Version 3.23.6

Limit size of dehoistable array indices. (Chromium issues 319835, 319860)

Limit the size for typed arrays to MaxSmi. (Chromium issue 319722)

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@17817 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index c6d59ef..dedd3fd 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,14 @@
+2013-11-18: Version 3.23.6
+
+        Limit size of dehoistable array indices.
+        (Chromium issues 319835, 319860)
+
+        Limit the size for typed arrays to MaxSmi.
+        (Chromium issue 319722)
+
+        Performance and stability improvements on all platforms.
+
+
 2013-11-15: Version 3.23.5
 
         Fixed missing type feedback check for Generic*String addition.
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index b8b1f49..871b419 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -517,12 +517,13 @@
       recorded_ast_id_(TypeFeedbackId::None()),
       positions_recorder_(this) {
   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-  num_pending_reloc_info_ = 0;
+  num_pending_32_bit_reloc_info_ = 0;
   num_pending_64_bit_reloc_info_ = 0;
   next_buffer_check_ = 0;
   const_pool_blocked_nesting_ = 0;
   no_const_pool_before_ = 0;
-  first_const_pool_use_ = -1;
+  first_const_pool_32_use_ = -1;
+  first_const_pool_64_use_ = -1;
   last_bound_pos_ = 0;
   ClearRecordedAstId();
 }
@@ -536,7 +537,7 @@
 void Assembler::GetCode(CodeDesc* desc) {
   // Emit constant pool if necessary.
   CheckConstPool(true, false);
-  ASSERT(num_pending_reloc_info_ == 0);
+  ASSERT(num_pending_32_bit_reloc_info_ == 0);
   ASSERT(num_pending_64_bit_reloc_info_ == 0);
 
   // Set up code descriptor.
@@ -3149,14 +3150,19 @@
   // to relocate any emitted relocation entries.
 
   // Relocate pending relocation entries.
-  for (int i = 0; i < num_pending_reloc_info_; i++) {
-    RelocInfo& rinfo = pending_reloc_info_[i];
+  for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
+    RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
            rinfo.rmode() != RelocInfo::POSITION);
     if (rinfo.rmode() != RelocInfo::JS_RETURN) {
       rinfo.set_pc(rinfo.pc() + pc_delta);
     }
   }
+  for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
+    RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
+    ASSERT(rinfo.rmode() == RelocInfo::NONE64);
+    rinfo.set_pc(rinfo.pc() + pc_delta);
+  }
 }
 
 
@@ -3164,7 +3170,7 @@
   // No relocation info should be pending while using db. db is used
   // to write pure data with no pointers and the constant pool should
   // be emitted before using db.
-  ASSERT(num_pending_reloc_info_ == 0);
+  ASSERT(num_pending_32_bit_reloc_info_ == 0);
   ASSERT(num_pending_64_bit_reloc_info_ == 0);
   CheckBuffer();
   *reinterpret_cast<uint8_t*>(pc_) = data;
@@ -3176,7 +3182,7 @@
   // No relocation info should be pending while using dd. dd is used
   // to write pure data with no pointers and the constant pool should
   // be emitted before using dd.
-  ASSERT(num_pending_reloc_info_ == 0);
+  ASSERT(num_pending_32_bit_reloc_info_ == 0);
   ASSERT(num_pending_64_bit_reloc_info_ == 0);
   CheckBuffer();
   *reinterpret_cast<uint32_t*>(pc_) = data;
@@ -3246,15 +3252,19 @@
 
 
 void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
-  ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
-  if (num_pending_reloc_info_ == 0) {
-    first_const_pool_use_ = pc_offset();
-  }
-  pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
   if (rinfo.rmode() == RelocInfo::NONE64) {
-    ++num_pending_64_bit_reloc_info_;
+    ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+    if (num_pending_64_bit_reloc_info_ == 0) {
+      first_const_pool_64_use_ = pc_offset();
+    }
+    pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+  } else {
+    ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+    if (num_pending_32_bit_reloc_info_ == 0) {
+      first_const_pool_32_use_ = pc_offset();
+    }
+    pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
   }
-  ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
   // Make sure the constant pool is not emitted in place of the next
   // instruction for which we just recorded relocation info.
   BlockConstPoolFor(1);
@@ -3264,12 +3274,15 @@
 void Assembler::BlockConstPoolFor(int instructions) {
   int pc_limit = pc_offset() + instructions * kInstrSize;
   if (no_const_pool_before_ < pc_limit) {
-    // If there are some pending entries, the constant pool cannot be blocked
-    // further than constant pool instruction's reach.
-    ASSERT((num_pending_reloc_info_ == 0) ||
-           (pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
-    // TODO(jfb) Also check 64-bit entries are in range (requires splitting
-    //           them up from 32-bit entries).
+    // Max pool start (if we need a jump and an alignment).
+#ifdef DEBUG
+    int start = pc_limit + kInstrSize + 2 * kPointerSize;
+    ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
+           (start - first_const_pool_32_use_ +
+            num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
+    ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
+           (start - first_const_pool_64_use_ < kMaxDistToFPPool));
+#endif
     no_const_pool_before_ = pc_limit;
   }
 
@@ -3290,8 +3303,8 @@
   }
 
   // There is nothing to do if there are no pending constant pool entries.
-  if (num_pending_reloc_info_ == 0)  {
-    ASSERT(num_pending_64_bit_reloc_info_ == 0);
+  if ((num_pending_32_bit_reloc_info_ == 0) &&
+      (num_pending_64_bit_reloc_info_ == 0)) {
     // Calculate the offset of the next check.
     next_buffer_check_ = pc_offset() + kCheckPoolInterval;
     return;
@@ -3300,24 +3313,18 @@
   // Check that the code buffer is large enough before emitting the constant
   // pool (include the jump over the pool and the constant pool marker and
   // the gap to the relocation information).
-  // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
   int jump_instr = require_jump ? kInstrSize : 0;
   int size_up_to_marker = jump_instr + kInstrSize;
-  int size_after_marker = num_pending_reloc_info_ * kPointerSize;
+  int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
   bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
-  // 64-bit values must be 64-bit aligned.
-  // We'll start emitting at PC: branch+marker, then 32-bit values, then
-  // 64-bit values which might need to be aligned.
-  bool require_64_bit_align = has_fp_values &&
-      (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
-  if (require_64_bit_align) {
-    size_after_marker += kInstrSize;
+  bool require_64_bit_align = false;
+  if (has_fp_values) {
+    require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
+    if (require_64_bit_align) {
+      size_after_marker += kInstrSize;
+    }
+    size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
   }
-  // num_pending_reloc_info_ also contains 64-bit entries, the above code
-  // therefore already counted half of the size for 64-bit entries. Add the
-  // remaining size.
-  STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
-  size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
 
   int size = size_up_to_marker + size_after_marker;
 
@@ -3330,19 +3337,25 @@
   //  * the instruction doesn't require a jump after itself to jump over the
   //    constant pool, and we're getting close to running out of range.
   if (!force_emit) {
-    ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
-    int dist = pc_offset() + size - first_const_pool_use_;
+    ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
+    bool need_emit = false;
     if (has_fp_values) {
-      if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
-          (require_jump || (dist < kMaxDistToFPPool / 2))) {
-        return;
-      }
-    } else {
-      if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
-          (require_jump || (dist < kMaxDistToIntPool / 2))) {
-        return;
+      int dist64 = pc_offset() +
+                   size -
+                   num_pending_32_bit_reloc_info_ * kPointerSize -
+                   first_const_pool_64_use_;
+      if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
+          (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
+        need_emit = true;
       }
     }
+    int dist32 =
+      pc_offset() + size - first_const_pool_32_use_;
+    if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
+        (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
+      need_emit = true;
+    }
+    if (!need_emit) return;
   }
 
   int needed_space = size + kGap;
@@ -3371,15 +3384,10 @@
 
     // Emit 64-bit constant pool entries first: their range is smaller than
     // 32-bit entries.
-    for (int i = 0; i < num_pending_reloc_info_; i++) {
-      RelocInfo& rinfo = pending_reloc_info_[i];
+    for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
+      RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
 
-      if (rinfo.rmode() != RelocInfo::NONE64) {
-        // 32-bit values emitted later.
-        continue;
-      }
-
-      ASSERT(!((uintptr_t)pc_ & 0x3));  // Check 64-bit alignment.
+      ASSERT(!((uintptr_t)pc_ & 0x7));  // Check 64-bit alignment.
 
       Instr instr = instr_at(rinfo.pc());
       // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
@@ -3389,53 +3397,85 @@
       int delta = pc_ - rinfo.pc() - kPcLoadDelta;
       ASSERT(is_uint10(delta));
 
+      bool found = false;
+      uint64_t value = rinfo.raw_data64();
+      for (int j = 0; j < i; j++) {
+        RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
+        if (value == rinfo2.raw_data64()) {
+          found = true;
+          ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
+          Instr instr2 = instr_at(rinfo2.pc());
+          ASSERT(IsVldrDPcImmediateOffset(instr2));
+          delta = GetVldrDRegisterImmediateOffset(instr2);
+          delta += rinfo2.pc() - rinfo.pc();
+          break;
+        }
+      }
+
       instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
 
-      const double double_data = rinfo.data64();
-      uint64_t uint_data = 0;
-      OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
-      emit(uint_data & 0xFFFFFFFF);
-      emit(uint_data >> 32);
+      if (!found) {
+        uint64_t uint_data = rinfo.raw_data64();
+        emit(uint_data & 0xFFFFFFFF);
+        emit(uint_data >> 32);
+      }
     }
 
     // Emit 32-bit constant pool entries.
-    for (int i = 0; i < num_pending_reloc_info_; i++) {
-      RelocInfo& rinfo = pending_reloc_info_[i];
+    for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
+      RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
       ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
              rinfo.rmode() != RelocInfo::POSITION &&
              rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
-             rinfo.rmode() != RelocInfo::CONST_POOL);
-
-      if (rinfo.rmode() == RelocInfo::NONE64) {
-        // 64-bit values emitted earlier.
-        continue;
-      }
+             rinfo.rmode() != RelocInfo::CONST_POOL &&
+             rinfo.rmode() != RelocInfo::NONE64);
 
       Instr instr = instr_at(rinfo.pc());
 
       // 64-bit loads shouldn't get here.
       ASSERT(!IsVldrDPcImmediateOffset(instr));
 
-      int delta = pc_ - rinfo.pc() - kPcLoadDelta;
-      // 0 is the smallest delta:
-      //   ldr rd, [pc, #0]
-      //   constant pool marker
-      //   data
-
       if (IsLdrPcImmediateOffset(instr) &&
           GetLdrRegisterImmediateOffset(instr) == 0) {
+        int delta = pc_ - rinfo.pc() - kPcLoadDelta;
         ASSERT(is_uint12(delta));
+        // 0 is the smallest delta:
+        //   ldr rd, [pc, #0]
+        //   constant pool marker
+        //   data
+
+        bool found = false;
+        if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) {
+          for (int j = 0; j < i; j++) {
+            RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
+
+            if ((rinfo2.data() == rinfo.data()) &&
+                (rinfo2.rmode() == rinfo.rmode())) {
+              Instr instr2 = instr_at(rinfo2.pc());
+              if (IsLdrPcImmediateOffset(instr2)) {
+                delta = GetLdrRegisterImmediateOffset(instr2);
+                delta += rinfo2.pc() - rinfo.pc();
+                found = true;
+                break;
+              }
+            }
+          }
+        }
+
         instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
-        emit(rinfo.data());
+
+        if (!found) {
+          emit(rinfo.data());
+        }
       } else {
         ASSERT(IsMovW(instr));
-        emit(rinfo.data());
       }
     }
 
-    num_pending_reloc_info_ = 0;
+    num_pending_32_bit_reloc_info_ = 0;
     num_pending_64_bit_reloc_info_ = 0;
-    first_const_pool_use_ = -1;
+    first_const_pool_32_use_ = -1;
+    first_const_pool_64_use_ = -1;
 
     RecordComment("]");
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 9bfdc9e..55e6f9a 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1443,7 +1443,8 @@
   static const int kMaxDistToIntPool = 4*KB;
   static const int kMaxDistToFPPool = 1*KB;
   // All relocations could be integer, it therefore acts as the limit.
-  static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
+  static const int kMaxNumPending32RelocInfo = kMaxDistToIntPool/kInstrSize;
+  static const int kMaxNumPending64RelocInfo = kMaxDistToFPPool/kInstrSize;
 
   // Postpone the generation of the constant pool for the specified number of
   // instructions.
@@ -1481,11 +1482,16 @@
   // StartBlockConstPool to have an effect.
   void EndBlockConstPool() {
     if (--const_pool_blocked_nesting_ == 0) {
+#ifdef DEBUG
+      // Max pool start (if we need a jump and an alignment).
+      int start = pc_offset() + kInstrSize + 2 * kPointerSize;
       // Check the constant pool hasn't been blocked for too long.
-      ASSERT((num_pending_reloc_info_ == 0) ||
-             (pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
+      ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
+             (start + num_pending_64_bit_reloc_info_ * kDoubleSize <
+              (first_const_pool_32_use_ + kMaxDistToIntPool)));
       ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
-             (pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
+             (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
+#endif
       // Two cases:
       //  * no_const_pool_before_ >= next_buffer_check_ and the emission is
       //    still blocked
@@ -1534,7 +1540,8 @@
 
   // Keep track of the first instruction requiring a constant pool entry
   // since the previous constant pool was emitted.
-  int first_const_pool_use_;
+  int first_const_pool_32_use_;
+  int first_const_pool_64_use_;
 
   // Relocation info generation
   // Each relocation is encoded as a variable size value
@@ -1548,12 +1555,12 @@
   // If every instruction in a long sequence is accessing the pool, we need one
   // pending relocation entry per instruction.
 
-  // the buffer of pending relocation info
-  RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
-  // number of pending reloc info entries in the buffer
-  int num_pending_reloc_info_;
-  // Number of pending reloc info entries included above which also happen to
-  // be 64-bit.
+  // The buffers of pending relocation info.
+  RelocInfo pending_32_bit_reloc_info_[kMaxNumPending32RelocInfo];
+  RelocInfo pending_64_bit_reloc_info_[kMaxNumPending64RelocInfo];
+  // Number of pending reloc info entries in the 32 bits buffer.
+  int num_pending_32_bit_reloc_info_;
+  // Number of pending reloc info entries in the 64 bits buffer.
   int num_pending_64_bit_reloc_info_;
 
   // The bound position, before this we cannot do instruction elimination.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 8db65bc..3473445 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -113,6 +113,17 @@
 }
 
 
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { r1, r0 };
+  descriptor->register_param_count_ = 2;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
 void LoadFieldStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -133,6 +144,19 @@
 }
 
 
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { r2 };
+  descriptor->register_param_count_ = 1;
+  descriptor->register_params_ = registers;
+  descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -5693,6 +5717,24 @@
 }
 
 
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+  __ mov(r1, r0);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ ldr(r0, MemOperand(fp, parameter_count_offset));
+  // The parameter count above includes the receiver for the arguments passed to
+  // the deoptimization handler. Subtract the receiver for the parameter count
+  // for the call.
+  __ sub(r0, r0, Operand(1));
+  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  ParameterCount argument_count(r0);
+  __ InvokeFunction(
+      r1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 787fc81..53ba309 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1328,8 +1328,10 @@
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* function = UseFixed(instr->function(), r1);
-  return MarkAsCall(
-      DefineFixed(new(zone()) LCallFunction(context, function), r0), instr);
+  LCallFunction* call = new(zone()) LCallFunction(context, function);
+  LInstruction* result = DefineFixed(call, r0);
+  if (instr->IsTailCall()) return result;
+  return MarkAsCall(result, instr);
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 207dd8c..56717a2 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -509,17 +509,36 @@
 }
 
 
+static int ArgumentsOffsetWithoutFrame(int index) {
+  ASSERT(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+
 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
   ASSERT(!op->IsRegister());
   ASSERT(!op->IsDoubleRegister());
   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
-  return MemOperand(fp, StackSlotOffset(op->index()));
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
 }
 
 
 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
   ASSERT(op->IsDoubleStackSlot());
-  return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(
+        sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
 }
 
 
@@ -4107,7 +4126,12 @@
 
   int arity = instr->arity();
   CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ mov(sp, fp);
+    __ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+  } else {
+    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  }
 }
 
 
@@ -5473,19 +5497,22 @@
     __ Push(Smi::FromInt(size));
   }
 
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
-    CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
-                            instr->context());
+    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
-    CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
-                            instr->context());
+    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
-    CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
-                            instr->context());
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   }
+  __ Push(Smi::FromInt(flags));
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(r0, result);
 }
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 8bb4a08..ad5bd76 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1500,6 +1500,9 @@
 }
 
 
+// Compute the hash code from the untagged key.  This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
   // First of all we assign the hash seed to scratch.
   LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -1566,8 +1569,7 @@
   sub(t1, t1, Operand(1));
 
   // Generate an unrolled loop that performs a few probes before giving up.
-  static const int kProbes = 4;
-  for (int i = 0; i < kProbes; i++) {
+  for (int i = 0; i < kNumberDictionaryProbes; i++) {
     // Use t2 for index calculations and keep the hash intact in t0.
     mov(t2, t0);
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -1584,7 +1586,7 @@
     add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
     ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
     cmp(key, Operand(ip));
-    if (i != kProbes - 1) {
+    if (i != kNumberDictionaryProbes - 1) {
       b(eq, &done);
     } else {
       b(ne, miss);
diff --git a/src/assembler.h b/src/assembler.h
index 8873658..049fa8c 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -372,6 +372,9 @@
   Mode rmode() const {  return rmode_; }
   intptr_t data() const { return data_; }
   double data64() const { return data64_; }
+  uint64_t raw_data64() {
+    return BitCast<uint64_t>(data64_);
+  }
   Code* host() const { return host_; }
 
   // Apply a relocation by delta bytes
diff --git a/src/ast.cc b/src/ast.cc
index 9deb71d..adf0fb8 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -776,7 +776,7 @@
   if (property == NULL) {
     // Function call.  Specialize for monomorphic calls.
     if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
-  } else {
+  } else if (property->key()->IsPropertyName()) {
     // Method call.  Specialize for the receiver types seen at runtime.
     Literal* key = property->key()->AsLiteral();
     ASSERT(key != NULL && key->value()->IsString());
@@ -803,6 +803,10 @@
       Handle<Map> map = receiver_types_.first();
       is_monomorphic_ = ComputeTarget(map, name);
     }
+  } else {
+    if (is_monomorphic_) {
+      keyed_array_call_is_holey_ = oracle->KeyedArrayCallIsHoley(this);
+    }
   }
 }
 
diff --git a/src/ast.h b/src/ast.h
index 42f6c8b..2a86696 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1728,6 +1728,7 @@
     return &receiver_types_;
   }
   virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+  bool KeyedArrayCallIsHoley() { return keyed_array_call_is_holey_; }
   CheckType check_type() const { return check_type_; }
 
   void set_string_check(Handle<JSObject> holder) {
@@ -1778,6 +1779,7 @@
         expression_(expression),
         arguments_(arguments),
         is_monomorphic_(false),
+        keyed_array_call_is_holey_(true),
         check_type_(RECEIVER_MAP_CHECK),
         return_id_(GetNextId(isolate)) { }
 
@@ -1786,6 +1788,7 @@
   ZoneList<Expression*>* arguments_;
 
   bool is_monomorphic_;
+  bool keyed_array_call_is_holey_;
   CheckType check_type_;
   SmallMapList receiver_types_;
   Handle<JSFunction> target_;
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index eb3bac8..a992dcb 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -210,7 +210,7 @@
 template <class Stub>
 class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
  public:
-  explicit CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
+  CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
       : CodeStubGraphBuilderBase(isolate, stub) {}
 
  protected:
@@ -594,6 +594,32 @@
 }
 
 
+template<>
+HValue* CodeStubGraphBuilder<KeyedArrayCallStub>::BuildCodeStub() {
+  int argc = casted_stub()->argc() + 1;
+  info()->set_parameter_count(argc);
+
+  HValue* receiver = Add<HParameter>(1);
+
+  // Load the expected initial array map from the context.
+  JSArrayBuilder array_builder(this, casted_stub()->elements_kind());
+  HValue* map = array_builder.EmitMapCode();
+
+  HValue* checked_receiver = Add<HCheckMapValue>(receiver, map);
+
+  HValue* function = BuildUncheckedMonomorphicElementAccess(
+      checked_receiver, GetParameter(0),
+      NULL, true, casted_stub()->elements_kind(),
+      false, NEVER_RETURN_HOLE, STANDARD_STORE);
+  return Add<HCallFunction>(function, argc, TAIL_CALL);
+}
+
+
+Handle<Code> KeyedArrayCallStub::GenerateCode(Isolate* isolate) {
+  return DoGenerateCode(isolate, this);
+}
+
+
 template <>
 HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
   BuildUncheckedMonomorphicElementAccess(
@@ -1277,4 +1303,20 @@
 }
 
 
+template<>
+HValue* CodeStubGraphBuilder<KeyedLoadDictionaryElementStub>::BuildCodeStub() {
+  HValue* receiver = GetParameter(0);
+  HValue* key = GetParameter(1);
+
+  Add<HCheckSmi>(key);
+
+  return BuildUncheckedDictionaryElementLoad(receiver, key);
+}
+
+
+Handle<Code> KeyedLoadDictionaryElementStub::GenerateCode(Isolate* isolate) {
+  return DoGenerateCode(isolate, this);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index f6e880f..166d46d 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -43,6 +43,7 @@
     : register_param_count_(-1),
       stack_parameter_count_(no_reg),
       hint_stack_parameter_count_(-1),
+      continuation_type_(NORMAL_CONTINUATION),
       function_mode_(NOT_JS_FUNCTION_STUB_MODE),
       register_params_(NULL),
       deoptimization_handler_(NULL),
@@ -51,6 +52,11 @@
       has_miss_handler_(false) { }
 
 
+void CodeStub::GenerateStubsRequiringBuiltinsAheadOfTime(Isolate* isolate) {
+  StubFailureTailCallTrampolineStub::GenerateAheadOfTime(isolate);
+}
+
+
 bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
   UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
   int index = stubs->FindEntry(GetKey());
@@ -958,7 +964,8 @@
 }
 
 
-void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
+void KeyedLoadDictionaryElementPlatformStub::Generate(
+    MacroAssembler* masm) {
   KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
 }
 
@@ -1109,6 +1116,12 @@
 }
 
 
+void StubFailureTailCallTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
+  StubFailureTailCallTrampolineStub stub;
+  stub.GetCode(isolate)->set_is_pregenerated(true);
+}
+
+
 void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
                                                intptr_t stack_pointer,
                                                Isolate* isolate) {
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 8f1b686..0f35eec 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -90,13 +90,15 @@
   V(TransitionElementsKind)              \
   V(StoreArrayLiteralElement)            \
   V(StubFailureTrampoline)               \
+  V(StubFailureTailCallTrampoline)       \
   V(ArrayConstructor)                    \
   V(InternalArrayConstructor)            \
   V(ProfileEntryHook)                    \
   V(StoreGlobal)                         \
   /* IC Handler stubs */                 \
   V(LoadField)                           \
-  V(KeyedLoadField)
+  V(KeyedLoadField)                      \
+  V(KeyedArrayCall)
 
 // List of code stubs only used on ARM platforms.
 #if V8_TARGET_ARCH_ARM
@@ -170,6 +172,7 @@
   virtual bool IsPregenerated(Isolate* isolate) { return false; }
 
   static void GenerateStubsAheadOfTime(Isolate* isolate);
+  static void GenerateStubsRequiringBuiltinsAheadOfTime(Isolate* isolate);
   static void GenerateFPStubs(Isolate* isolate);
 
   // Some stubs put untagged junk on the stack that cannot be scanned by the
@@ -279,6 +282,9 @@
 enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
 enum HandlerArgumentsMode { DONT_PASS_ARGUMENTS, PASS_ARGUMENTS };
 
+enum ContinuationType { NORMAL_CONTINUATION, TAIL_CALL_CONTINUATION };
+
+
 struct CodeStubInterfaceDescriptor {
   CodeStubInterfaceDescriptor();
   int register_param_count_;
@@ -287,18 +293,23 @@
   // if hint_stack_parameter_count_ > 0, the code stub can optimize the
   // return sequence. Default value is -1, which means it is ignored.
   int hint_stack_parameter_count_;
+  ContinuationType continuation_type_;
   StubFunctionMode function_mode_;
   Register* register_params_;
 
   Address deoptimization_handler_;
   HandlerArgumentsMode handler_arguments_mode_;
 
+  bool initialized() const { return register_param_count_ >= 0; }
+
+  bool HasTailCallContinuation() const {
+    return continuation_type_ == TAIL_CALL_CONTINUATION;
+  }
+
   int environment_length() const {
     return register_param_count_;
   }
 
-  bool initialized() const { return register_param_count_ >= 0; }
-
   void SetMissHandler(ExternalReference handler) {
     miss_handler_ = handler;
     has_miss_handler_ = true;
@@ -876,6 +887,11 @@
  public:
   virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
   virtual int GetStubFlags() { return kind(); }
+
+ protected:
+  HandlerStub() : HICStub() { }
+  virtual int NotMissMinorKey() { return bit_field_; }
+  int bit_field_;
 };
 
 
@@ -937,9 +953,6 @@
   class IndexBits: public BitField<int, 5, 11> {};
   class UnboxedDoubleBits: public BitField<bool, 16, 1> {};
   virtual CodeStub::Major MajorKey() { return LoadField; }
-  virtual int NotMissMinorKey() { return bit_field_; }
-
-  int bit_field_;
 };
 
 
@@ -1018,6 +1031,50 @@
 };
 
 
+class KeyedArrayCallStub: public HICStub {
+ public:
+  KeyedArrayCallStub(bool holey, int argc) : HICStub(), argc_(argc) {
+    bit_field_ = ContextualBits::encode(false) | HoleyBits::encode(holey);
+  }
+
+  virtual Code::Kind kind() const { return Code::KEYED_CALL_IC; }
+  virtual Code::ExtraICState GetExtraICState() { return bit_field_; }
+
+  ElementsKind elements_kind() {
+    return HoleyBits::decode(bit_field_) ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+  }
+
+  int argc() { return argc_; }
+  virtual int GetStubFlags() { return argc(); }
+
+  static bool IsHoley(Handle<Code> code) {
+    Code::ExtraICState state = code->extra_ic_state();
+    return HoleyBits::decode(state);
+  }
+
+  virtual void InitializeInterfaceDescriptor(
+      Isolate* isolate,
+      CodeStubInterfaceDescriptor* descriptor);
+
+  virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ private:
+  virtual int NotMissMinorKey() {
+    return GetExtraICState() | ArgcBits::encode(argc_);
+  }
+
+  class ContextualBits: public BitField<bool, 0, 1> {};
+  STATIC_ASSERT(CallICBase::Contextual::kShift == ContextualBits::kShift);
+  STATIC_ASSERT(CallICBase::Contextual::kSize == ContextualBits::kSize);
+  class HoleyBits: public BitField<bool, 1, 1> {};
+  STATIC_ASSERT(Code::kArgumentsBits <= kStubMinorKeyBits - 2);
+  class ArgcBits: public BitField<int, 2, Code::kArgumentsBits> {};
+  virtual CodeStub::Major MajorKey() { return KeyedArrayCall; }
+  int bit_field_;
+  int argc_;
+};
+
+
 class BinaryOpStub: public HydrogenCodeStub {
  public:
   BinaryOpStub(Token::Value op, OverwriteMode mode)
@@ -1782,17 +1839,35 @@
 };
 
 
-class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
+class KeyedLoadDictionaryElementStub : public HydrogenCodeStub {
  public:
   KeyedLoadDictionaryElementStub() {}
 
+  virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+
+  virtual void InitializeInterfaceDescriptor(
+      Isolate* isolate,
+      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ private:
+  Major MajorKey() { return KeyedLoadElement; }
+  int NotMissMinorKey() { return DICTIONARY_ELEMENTS; }
+
+  DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
+};
+
+
+class KeyedLoadDictionaryElementPlatformStub : public PlatformCodeStub {
+ public:
+  KeyedLoadDictionaryElementPlatformStub() {}
+
   void Generate(MacroAssembler* masm);
 
  private:
   Major MajorKey() { return KeyedLoadElement; }
   int MinorKey() { return DICTIONARY_ELEMENTS; }
 
-  DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
+  DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementPlatformStub);
 };
 
 
@@ -2399,6 +2474,27 @@
 };
 
 
+class StubFailureTailCallTrampolineStub : public PlatformCodeStub {
+ public:
+  StubFailureTailCallTrampolineStub() : fp_registers_(CanUseFPRegisters()) {}
+
+  virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
+
+  static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+  class FPRegisters: public BitField<bool, 0, 1> {};
+  Major MajorKey() { return StubFailureTailCallTrampoline; }
+  int MinorKey() { return FPRegisters::encode(fp_registers_); }
+
+  void Generate(MacroAssembler* masm);
+
+  bool fp_registers_;
+
+  DISALLOW_COPY_AND_ASSIGN(StubFailureTailCallTrampolineStub);
+};
+
+
 class ProfileEntryHookStub : public PlatformCodeStub {
  public:
   explicit ProfileEntryHookStub() {}
diff --git a/src/codegen.h b/src/codegen.h
index ea20296..33672a2 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -112,6 +112,8 @@
   DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
 };
 
+static const int kNumberDictionaryProbes = 4;
+
 
 } }  // namespace v8::internal
 
diff --git a/src/compiler.cc b/src/compiler.cc
index e86baa0..1ec2626 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -59,7 +59,8 @@
     : flags_(LanguageModeField::encode(CLASSIC_MODE)),
       script_(script),
       osr_ast_id_(BailoutId::None()),
-      osr_pc_offset_(0) {
+      osr_pc_offset_(0),
+      parameter_count_(0) {
   Initialize(script->GetIsolate(), BASE, zone);
 }
 
@@ -70,7 +71,8 @@
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       osr_ast_id_(BailoutId::None()),
-      osr_pc_offset_(0) {
+      osr_pc_offset_(0),
+      parameter_count_(0) {
   Initialize(script_->GetIsolate(), BASE, zone);
 }
 
@@ -83,7 +85,8 @@
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       context_(closure->context()),
       osr_ast_id_(BailoutId::None()),
-      osr_pc_offset_(0) {
+      osr_pc_offset_(0),
+      parameter_count_(0) {
   Initialize(script_->GetIsolate(), BASE, zone);
 }
 
@@ -94,7 +97,8 @@
     : flags_(LanguageModeField::encode(CLASSIC_MODE) |
              IsLazy::encode(true)),
       osr_ast_id_(BailoutId::None()),
-      osr_pc_offset_(0) {
+      osr_pc_offset_(0),
+      parameter_count_(0) {
   Initialize(isolate, STUB, zone);
   code_stub_ = stub;
 }
@@ -184,8 +188,12 @@
 
 
 int CompilationInfo::num_parameters() const {
-  ASSERT(!IsStub());
-  return scope()->num_parameters();
+  if (IsStub()) {
+    ASSERT(parameter_count_ > 0);
+    return parameter_count_;
+  } else {
+    return scope()->num_parameters();
+  }
 }
 
 
diff --git a/src/compiler.h b/src/compiler.h
index 2d9e52a..5ebcfd9 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -98,6 +98,10 @@
     ASSERT(!is_lazy());
     flags_ |= IsGlobal::encode(true);
   }
+  void set_parameter_count(int parameter_count) {
+    ASSERT(IsStub());
+    parameter_count_ = parameter_count;
+  }
   void SetLanguageMode(LanguageMode language_mode) {
     ASSERT(this->language_mode() == CLASSIC_MODE ||
            this->language_mode() == language_mode ||
@@ -442,6 +446,9 @@
   // during graph optimization.
   int opt_count_;
 
+  // Number of parameters used for compilation of stubs that require arguments.
+  int parameter_count_;
+
   Handle<Foreign> object_wrapper_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
diff --git a/src/d8.cc b/src/d8.cc
index e855c85..d227ac3 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1358,6 +1358,9 @@
     if (strcmp(argv[i], "--stress-opt") == 0) {
       options.stress_opt = true;
       argv[i] = NULL;
+    } else if (strcmp(argv[i], "--nostress-opt") == 0) {
+      options.stress_opt = false;
+      argv[i] = NULL;
     } else if (strcmp(argv[i], "--stress-deopt") == 0) {
       options.stress_deopt = true;
       argv[i] = NULL;
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index ef4e39e..2f4d7dc 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1465,8 +1465,9 @@
   int output_frame_size = height_in_bytes + fixed_frame_size;
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(),
-           "  translating %s => StubFailureTrampolineStub, height=%d\n",
+           "  translating %s => StubFailure%sTrampolineStub, height=%d\n",
            CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
+           descriptor->HasTailCallContinuation() ? "TailCall" : "",
            height_in_bytes);
   }
 
@@ -1538,7 +1539,8 @@
            top_address + output_frame_offset, output_frame_offset, value);
   }
 
-  intptr_t caller_arg_count = 0;
+  intptr_t caller_arg_count = descriptor->HasTailCallContinuation()
+      ? compiled_code_->arguments_count() + 1 : 0;
   bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
 
   // Build the Arguments object for the caller's parameters and a pointer to it.
@@ -1634,9 +1636,13 @@
 
   // Compute this frame's PC, state, and continuation.
   Code* trampoline = NULL;
-  StubFunctionMode function_mode = descriptor->function_mode_;
-  StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
-                                                           isolate_);
+  if (descriptor->HasTailCallContinuation()) {
+    StubFailureTailCallTrampolineStub().FindCodeInCache(&trampoline, isolate_);
+  } else {
+    StubFunctionMode function_mode = descriptor->function_mode_;
+    StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
+                                                             isolate_);
+  }
   ASSERT(trampoline != NULL);
   output_frame->SetPc(reinterpret_cast<intptr_t>(
       trampoline->instruction_start()));
diff --git a/src/elements-kind.cc b/src/elements-kind.cc
index 16f5bff..689c220 100644
--- a/src/elements-kind.cc
+++ b/src/elements-kind.cc
@@ -35,6 +35,36 @@
 namespace internal {
 
 
+int ElementsKindToShiftSize(ElementsKind elements_kind) {
+  switch (elements_kind) {
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      return 0;
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      return 1;
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+      return 2;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+    case FAST_HOLEY_DOUBLE_ELEMENTS:
+      return 3;
+    case FAST_SMI_ELEMENTS:
+    case FAST_ELEMENTS:
+    case FAST_HOLEY_SMI_ELEMENTS:
+    case FAST_HOLEY_ELEMENTS:
+    case DICTIONARY_ELEMENTS:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      return kPointerSizeLog2;
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
 const char* ElementsKindToString(ElementsKind kind) {
   ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
   return accessor->name();
diff --git a/src/elements-kind.h b/src/elements-kind.h
index f5280d6..51a6902 100644
--- a/src/elements-kind.h
+++ b/src/elements-kind.h
@@ -81,6 +81,7 @@
 const int kFastElementsKindPackedToHoley =
     FAST_HOLEY_SMI_ELEMENTS - FAST_SMI_ELEMENTS;
 
+int ElementsKindToShiftSize(ElementsKind elements_kind);
 const char* ElementsKindToString(ElementsKind kind);
 void PrintElementsKind(FILE* out, ElementsKind kind);
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 1781f7f..a5cf326 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -202,6 +202,8 @@
 // Flags for experimental implementation features.
 DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
 DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
+DEFINE_bool(compiled_keyed_dictionary_loads, false,
+            "use optimizing compiler to generate keyed dictionary load stubs")
 DEFINE_bool(clever_optimizations, true,
             "Optimize object size, Array shift, DOM strings and string +")
 DEFINE_bool(pretenuring, true, "allocate objects in old space")
diff --git a/src/frames.cc b/src/frames.cc
index 167277f..4abacad 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1401,6 +1401,11 @@
     return trampoline;
   }
 
+  StubFailureTailCallTrampolineStub().FindCodeInCache(&trampoline, isolate());
+  if (trampoline->contains(pc())) {
+    return trampoline;
+  }
+
   UNREACHABLE();
   return NULL;
 }
diff --git a/src/heap.cc b/src/heap.cc
index 66eeadb..dfb2478 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -3118,6 +3118,12 @@
 }
 
 
+void Heap::CreateStubsRequiringBuiltins() {
+  HandleScope scope(isolate());
+  CodeStub::GenerateStubsRequiringBuiltinsAheadOfTime(isolate());
+}
+
+
 bool Heap::CreateInitialObjects() {
   Object* obj;
 
diff --git a/src/heap.h b/src/heap.h
index ef1e415..0570557 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -617,9 +617,6 @@
     return old_data_space_->allocation_limit_address();
   }
 
-  // Uncommit unused semi space.
-  bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
-
   // Allocates and initializes a new JavaScript object based on a
   // constructor.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1175,19 +1172,6 @@
   // Converts the given boolean condition to JavaScript boolean value.
   inline Object* ToBoolean(bool condition);
 
-  // Code that should be run before and after each GC.  Includes some
-  // reporting/verification activities when compiled with DEBUG set.
-  void GarbageCollectionPrologue();
-  void GarbageCollectionEpilogue();
-
-  // Performs garbage collection operation.
-  // Returns whether there is a chance that another major GC could
-  // collect more garbage.
-  bool CollectGarbage(AllocationSpace space,
-                      GarbageCollector collector,
-                      const char* gc_reason,
-                      const char* collector_reason);
-
   // Performs garbage collection operation.
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
@@ -1429,9 +1413,6 @@
 #endif
   }
 
-  // Fill in bogus values in from space
-  void ZapFromSpace();
-
   // Print short heap statistics.
   void PrintShortHeapStatistics();
 
@@ -1475,9 +1456,6 @@
   static inline void ScavengePointer(HeapObject** p);
   static inline void ScavengeObject(HeapObject** p, HeapObject* object);
 
-  // Commits from space if it is uncommitted.
-  void EnsureFromSpaceIsCommitted();
-
   // Support for partial snapshots.  After calling this we have a linear
   // space to write objects in each space.
   void ReserveSpace(int *sizes, Address* addresses);
@@ -2086,10 +2064,23 @@
     gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
   }
 
+  // Code that should be run before and after each GC.  Includes some
+  // reporting/verification activities when compiled with DEBUG set.
+  void GarbageCollectionPrologue();
+  void GarbageCollectionEpilogue();
+
   // Checks whether a global GC is necessary
   GarbageCollector SelectGarbageCollector(AllocationSpace space,
                                           const char** reason);
 
+  // Performs garbage collection operation.
+  // Returns whether there is a chance that another major GC could
+  // collect more garbage.
+  bool CollectGarbage(AllocationSpace space,
+                      GarbageCollector collector,
+                      const char* gc_reason,
+                      const char* collector_reason);
+
   // Performs garbage collection
   // Returns whether there is a chance another major GC could
   // collect more garbage.
@@ -2135,6 +2126,7 @@
   NO_INLINE(void CreateJSConstructEntryStub());
 
   void CreateFixedStubs();
+  void CreateStubsRequiringBuiltins();
 
   MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string,
                                              Object* to_number,
@@ -2168,6 +2160,15 @@
   // Performs a minor collection in new generation.
   void Scavenge();
 
+  // Commits from space if it is uncommitted.
+  void EnsureFromSpaceIsCommitted();
+
+  // Uncommit unused semi space.
+  bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
+  // Fill in bogus values in from space
+  void ZapFromSpace();
+
   static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
       Heap* heap,
       Object** pointer);
diff --git a/src/hydrogen-dehoist.cc b/src/hydrogen-dehoist.cc
index 67e6718..bdf2cfb 100644
--- a/src/hydrogen-dehoist.cc
+++ b/src/hydrogen-dehoist.cc
@@ -53,7 +53,7 @@
   int32_t value = constant->Integer32Value() * sign;
   // We limit offset values to 30 bits because we want to avoid the risk of
   // overflows when the offset is added to the object header size.
-  if (value >= 1 << 30 || value < 0) return;
+  if (value >= 1 << array_operation->MaxIndexOffsetBits() || value < 0) return;
   array_operation->SetKey(subexpression);
   if (binary_operation->HasNoUses()) {
     binary_operation->DeleteAndReplaceWith(NULL);
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 39d5958..38eabda 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -521,12 +521,19 @@
 
 
 bool HValue::IsInteger32Constant() {
-  return IsConstant() && HConstant::cast(this)->HasInteger32Value();
+  HValue* value_to_check = IsForceRepresentation()
+      ? HForceRepresentation::cast(this)->value()
+      : this;
+  return value_to_check->IsConstant() &&
+      HConstant::cast(value_to_check)->HasInteger32Value();
 }
 
 
 int32_t HValue::GetInteger32Constant() {
-  return HConstant::cast(this)->Integer32Value();
+  HValue* constant_value = IsForceRepresentation()
+      ? HForceRepresentation::cast(this)->value()
+      : this;
+  return HConstant::cast(constant_value)->Integer32Value();
 }
 
 
@@ -3397,7 +3404,7 @@
     }
   }
 
-  if (new_dominator_size > Page::kMaxNonCodeHeapObjectSize) {
+  if (new_dominator_size > isolate()->heap()->MaxRegularSpaceAllocationSize()) {
     if (FLAG_trace_allocation_folding) {
       PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
           id(), Mnemonic(), dominator_allocate->id(),
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 9bd0b90..6b92d5b 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1579,9 +1579,6 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
 
- protected:
-  virtual int RedefinedOperandIndex() { return 0; }
-
  private:
   HForceRepresentation(HValue* value, Representation required_representation) {
     SetOperandAt(0, value);
@@ -2289,19 +2286,38 @@
 };
 
 
+enum CallMode {
+  NORMAL_CALL,
+  TAIL_CALL
+};
+
+
 class HCallFunction V8_FINAL : public HBinaryCall {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(
+      HCallFunction, HValue*, int, CallMode);
+
+  bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
 
   HValue* context() { return first(); }
   HValue* function() { return second(); }
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction)
 
- private:
-  HCallFunction(HValue* context, HValue* function, int argument_count)
-      : HBinaryCall(context, function, argument_count) {
+  virtual int argument_delta() const V8_OVERRIDE {
+    if (IsTailCall()) return 0;
+    return -argument_count();
   }
+
+ private:
+  HCallFunction(HValue* context,
+                HValue* function,
+                int argument_count,
+                CallMode mode = NORMAL_CALL)
+      : HBinaryCall(context, function, argument_count), call_mode_(mode) {
+  }
+  CallMode call_mode_;
 };
 
 
@@ -6125,6 +6141,7 @@
   virtual HValue* GetKey() = 0;
   virtual void SetKey(HValue* key) = 0;
   virtual void SetIndexOffset(uint32_t index_offset) = 0;
+  virtual int MaxIndexOffsetBits() = 0;
   virtual bool IsDehoisted() = 0;
   virtual void SetDehoisted(bool is_dehoisted) = 0;
   virtual ~ArrayInstructionInterface() { };
@@ -6164,6 +6181,9 @@
   void SetIndexOffset(uint32_t index_offset) {
     bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
   }
+  virtual int MaxIndexOffsetBits() {
+    return kBitsForIndexOffset;
+  }
   HValue* GetKey() { return key(); }
   void SetKey(HValue* key) { SetOperandAt(1, key); }
   bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
@@ -6550,6 +6570,9 @@
   ElementsKind elements_kind() const { return elements_kind_; }
   uint32_t index_offset() { return index_offset_; }
   void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+  virtual int MaxIndexOffsetBits() {
+    return 31 - ElementsKindToShiftSize(elements_kind_);
+  }
   HValue* GetKey() { return key(); }
   void SetKey(HValue* key) { SetOperandAt(1, key); }
   bool IsDehoisted() { return is_dehoisted_; }
@@ -7183,6 +7206,8 @@
   DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
 
  protected:
+  virtual int RedefinedOperandIndex() { return 0; }
+
   virtual bool DataEquals(HValue* other) V8_OVERRIDE {
     return true;
   }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 67bea00..b544904 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -605,7 +605,8 @@
           block->predecessors()->first()->last_environment()->ast_id();
       for (int k = 0; k < block->predecessors()->length(); k++) {
         HBasicBlock* predecessor = block->predecessors()->at(k);
-        ASSERT(predecessor->end()->IsGoto());
+        ASSERT(predecessor->end()->IsGoto() ||
+               predecessor->end()->IsDeoptimize());
         ASSERT(predecessor->last_environment()->ast_id() == id);
       }
     }
@@ -745,19 +746,20 @@
 HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder)
     : builder_(builder),
       finished_(false),
-      deopt_then_(false),
-      deopt_else_(false),
       did_then_(false),
       did_else_(false),
+      did_else_if_(false),
       did_and_(false),
       did_or_(false),
       captured_(false),
       needs_compare_(true),
+      pending_merge_block_(false),
       split_edge_merge_block_(NULL),
-      merge_block_(NULL) {
+      merge_at_join_blocks_(NULL),
+      normal_merge_at_join_block_count_(0),
+      deopt_merge_at_join_block_count_(0) {
   HEnvironment* env = builder->environment();
   first_true_block_ = builder->CreateBasicBlock(env->Copy());
-  last_true_block_ = NULL;
   first_false_block_ = builder->CreateBasicBlock(env->Copy());
 }
 
@@ -767,19 +769,20 @@
     HIfContinuation* continuation)
     : builder_(builder),
       finished_(false),
-      deopt_then_(false),
-      deopt_else_(false),
       did_then_(false),
       did_else_(false),
+      did_else_if_(false),
       did_and_(false),
       did_or_(false),
       captured_(false),
       needs_compare_(false),
+      pending_merge_block_(false),
       first_true_block_(NULL),
-      last_true_block_(NULL),
       first_false_block_(NULL),
       split_edge_merge_block_(NULL),
-      merge_block_(NULL) {
+      merge_at_join_blocks_(NULL),
+      normal_merge_at_join_block_count_(0),
+      deopt_merge_at_join_block_count_(0) {
   continuation->Continue(&first_true_block_,
                          &first_false_block_);
 }
@@ -787,6 +790,20 @@
 
 HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
     HControlInstruction* compare) {
+  ASSERT(did_then_ == did_else_);
+  if (did_else_) {
+    // Handle if-then-elseif
+    did_else_if_ = true;
+    did_else_ = false;
+    did_then_ = false;
+    did_and_ = false;
+    did_or_ = false;
+    pending_merge_block_ = false;
+    split_edge_merge_block_ = NULL;
+    HEnvironment* env = builder_->environment();
+    first_true_block_ = builder_->CreateBasicBlock(env->Copy());
+    first_false_block_ = builder_->CreateBasicBlock(env->Copy());
+  }
   if (split_edge_merge_block_ != NULL) {
     HEnvironment* env = first_false_block_->last_environment();
     HBasicBlock* split_edge =
@@ -842,29 +859,30 @@
 
 void HGraphBuilder::IfBuilder::CaptureContinuation(
     HIfContinuation* continuation) {
+  ASSERT(!did_else_if_);
   ASSERT(!finished_);
   ASSERT(!captured_);
-  HBasicBlock* true_block = last_true_block_ == NULL
-      ? first_true_block_
-      : last_true_block_;
-  HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
-      ? builder_->current_block()
-      : first_false_block_;
+
+  HBasicBlock* true_block = NULL;
+  HBasicBlock* false_block = NULL;
+  Finish(&true_block, &false_block);
+  ASSERT(true_block != NULL);
+  ASSERT(false_block != NULL);
   continuation->Capture(true_block, false_block);
   captured_ = true;
+  builder_->set_current_block(NULL);
   End();
 }
 
 
 void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
+  ASSERT(!did_else_if_);
   ASSERT(!finished_);
   ASSERT(!captured_);
-  ASSERT(did_then_);
-  if (!did_else_) Else();
-  HBasicBlock* true_block = last_true_block_ == NULL
-      ? first_true_block_
-      : last_true_block_;
-  HBasicBlock* false_block = builder_->current_block();
+  HBasicBlock* true_block = NULL;
+  HBasicBlock* false_block = NULL;
+  Finish(&true_block, &false_block);
+  merge_at_join_blocks_ = NULL;
   if (true_block != NULL && !true_block->IsFinished()) {
     ASSERT(continuation->IsTrueReachable());
     builder_->GotoNoSimulate(true_block, continuation->true_branch());
@@ -895,6 +913,7 @@
     builder_->FinishCurrentBlock(branch);
   }
   builder_->set_current_block(first_true_block_);
+  pending_merge_block_ = true;
 }
 
 
@@ -902,20 +921,17 @@
   ASSERT(did_then_);
   ASSERT(!captured_);
   ASSERT(!finished_);
-  last_true_block_ = builder_->current_block();
+  AddMergeAtJoinBlock(false);
   builder_->set_current_block(first_false_block_);
+  pending_merge_block_ = true;
   did_else_ = true;
 }
 
 
 void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
   ASSERT(did_then_);
-  if (did_else_) {
-    deopt_else_ = true;
-  } else {
-    deopt_then_ = true;
-  }
   builder_->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
+  AddMergeAtJoinBlock(true);
 }
 
 
@@ -923,51 +939,99 @@
   HValue* parameter_count = builder_->graph()->GetConstantMinus1();
   builder_->FinishExitCurrentBlock(
       builder_->New<HReturn>(value, parameter_count));
-  if (did_else_) {
-    first_false_block_ = NULL;
-  } else {
-    first_true_block_ = NULL;
+  AddMergeAtJoinBlock(false);
+}
+
+
+void HGraphBuilder::IfBuilder::AddMergeAtJoinBlock(bool deopt) {
+  if (!pending_merge_block_) return;
+  HBasicBlock* block = builder_->current_block();
+  ASSERT(block == NULL || !block->IsFinished());
+  MergeAtJoinBlock* record =
+      new(builder_->zone()) MergeAtJoinBlock(block, deopt,
+                                             merge_at_join_blocks_);
+  merge_at_join_blocks_ = record;
+  if (block != NULL) {
+    ASSERT(block->end() == NULL);
+    if (deopt) {
+      normal_merge_at_join_block_count_++;
+    } else {
+      deopt_merge_at_join_block_count_++;
+    }
   }
+  builder_->set_current_block(NULL);
+  pending_merge_block_ = false;
+}
+
+
+void HGraphBuilder::IfBuilder::Finish() {
+  ASSERT(!finished_);
+  if (!did_then_) {
+    Then();
+  }
+  AddMergeAtJoinBlock(false);
+  if (!did_else_) {
+    Else();
+    AddMergeAtJoinBlock(false);
+  }
+  finished_ = true;
+}
+
+
+void HGraphBuilder::IfBuilder::Finish(HBasicBlock** then_continuation,
+                                      HBasicBlock** else_continuation) {
+  Finish();
+
+  MergeAtJoinBlock* else_record = merge_at_join_blocks_;
+  if (else_continuation != NULL) {
+    *else_continuation = else_record->block_;
+  }
+  MergeAtJoinBlock* then_record = else_record->next_;
+  if (then_continuation != NULL) {
+    *then_continuation = then_record->block_;
+  }
+  ASSERT(then_record->next_ == NULL);
 }
 
 
 void HGraphBuilder::IfBuilder::End() {
-  if (!captured_) {
-    ASSERT(did_then_);
-    if (!did_else_) {
-      last_true_block_ = builder_->current_block();
-    }
-    if (last_true_block_ == NULL || last_true_block_->IsFinished()) {
-      ASSERT(did_else_);
-      // Return on true. Nothing to do, just continue the false block.
-    } else if (first_false_block_ == NULL ||
-               (did_else_ && builder_->current_block()->IsFinished())) {
-      // Deopt on false. Nothing to do except switching to the true block.
-      builder_->set_current_block(last_true_block_);
-    } else {
-      merge_block_ = builder_->graph()->CreateBasicBlock();
-      ASSERT(!finished_);
-      if (!did_else_) Else();
-      ASSERT(!last_true_block_->IsFinished());
-      HBasicBlock* last_false_block = builder_->current_block();
-      ASSERT(!last_false_block->IsFinished());
-      if (deopt_then_) {
-        builder_->GotoNoSimulate(last_false_block, merge_block_);
-        builder_->PadEnvironmentForContinuation(last_true_block_,
-                                                merge_block_);
-        builder_->GotoNoSimulate(last_true_block_, merge_block_);
-      } else {
-        builder_->GotoNoSimulate(last_true_block_, merge_block_);
-        if (deopt_else_) {
-          builder_->PadEnvironmentForContinuation(last_false_block,
-                                                  merge_block_);
-        }
-        builder_->GotoNoSimulate(last_false_block, merge_block_);
+  if (captured_) return;
+  Finish();
+
+  int total_merged_blocks = normal_merge_at_join_block_count_ +
+    deopt_merge_at_join_block_count_;
+  ASSERT(total_merged_blocks >= 1);
+  HBasicBlock* merge_block = total_merged_blocks == 1
+      ? NULL : builder_->graph()->CreateBasicBlock();
+
+  // Merge non-deopt blocks first to ensure environment has right size for
+  // padding.
+  MergeAtJoinBlock* current = merge_at_join_blocks_;
+  while (current != NULL) {
+    if (!current->deopt_ && current->block_ != NULL) {
+      // If there is only one block that makes it through to the end of the
+      // if, then just set it as the current block and continue rather then
+      // creating an unnecessary merge block.
+      if (total_merged_blocks == 1) {
+        builder_->set_current_block(current->block_);
+        return;
       }
-      builder_->set_current_block(merge_block_);
+      builder_->GotoNoSimulate(current->block_, merge_block);
     }
+    current = current->next_;
   }
-  finished_ = true;
+
+  // Merge deopt blocks, padding when necessary.
+  current = merge_at_join_blocks_;
+  while (current != NULL) {
+    if (current->deopt_ && current->block_ != NULL) {
+      builder_->PadEnvironmentForContinuation(current->block_,
+                                              merge_block);
+      builder_->GotoNoSimulate(current->block_, merge_block);
+    }
+    current = current->next_;
+  }
+  builder_->set_current_block(merge_block);
 }
 
 
@@ -1051,6 +1115,7 @@
   }
 
   builder_->GotoNoSimulate(exit_trampoline_block_);
+  builder_->set_current_block(NULL);
 }
 
 
@@ -1340,6 +1405,138 @@
 }
 
 
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
+    HValue* elements,
+    HValue* key,
+    HValue* hash,
+    HValue* mask,
+    int current_probe) {
+  if (current_probe == kNumberDictionaryProbes) {
+    return NULL;
+  }
+
+  int32_t offset = SeededNumberDictionary::GetProbeOffset(current_probe);
+  HValue* raw_index = (current_probe == 0)
+      ? hash
+      : Add<HAdd>(hash, Add<HConstant>(offset));
+  raw_index = Add<HBitwise>(Token::BIT_AND, raw_index, mask);
+  int32_t entry_size = SeededNumberDictionary::kEntrySize;
+  raw_index = Add<HMul>(raw_index, Add<HConstant>(entry_size));
+  raw_index->ClearFlag(HValue::kCanOverflow);
+
+  int32_t base_offset = SeededNumberDictionary::kElementsStartIndex;
+  HValue* key_index = Add<HAdd>(raw_index, Add<HConstant>(base_offset));
+  key_index->ClearFlag(HValue::kCanOverflow);
+
+  HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
+                                          static_cast<HValue*>(NULL),
+                                          FAST_SMI_ELEMENTS);
+
+  IfBuilder key_compare(this);
+  key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key);
+  key_compare.Then();
+  {
+    // Key at the current probe doesn't match, try at the next probe.
+    HValue* result = BuildUncheckedDictionaryElementLoadHelper(
+        elements, key, hash, mask, current_probe + 1);
+    if (result == NULL) {
+      key_compare.Deopt("probes exhausted in keyed load dictionary lookup");
+      result = graph()->GetConstantUndefined();
+    } else {
+      Push(result);
+    }
+  }
+  key_compare.Else();
+  {
+    // Key at current probe matches. Details must be zero, otherwise the
+    // dictionary element requires special handling.
+    HValue* details_index = Add<HAdd>(raw_index,
+                                    Add<HConstant>(base_offset + 2));
+    details_index->ClearFlag(HValue::kCanOverflow);
+
+    HValue* details = Add<HLoadKeyed>(elements, details_index,
+                                      static_cast<HValue*>(NULL),
+                                      FAST_SMI_ELEMENTS);
+    IfBuilder details_compare(this);
+    details_compare.If<HCompareNumericAndBranch>(details,
+                                                 graph()->GetConstant0(),
+                                                 Token::NE);
+    details_compare.ThenDeopt("keyed load dictionary element not fast case");
+
+    details_compare.Else();
+    {
+      // Key matches and details are zero --> fast case. Load and return the
+      // value.
+      HValue* result_index = Add<HAdd>(raw_index,
+                                       Add<HConstant>(base_offset + 1));
+      result_index->ClearFlag(HValue::kCanOverflow);
+
+      Push(Add<HLoadKeyed>(elements, result_index,
+                           static_cast<HValue*>(NULL),
+                           FAST_ELEMENTS));
+    }
+    details_compare.End();
+  }
+  key_compare.End();
+
+  return Pop();
+}
+
+
+HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
+  int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
+  HValue* seed = Add<HConstant>(seed_value);
+  HValue* hash = Add<HBitwise>(Token::BIT_XOR, index, seed);
+
+  // hash = ~hash + (hash << 15);
+  HValue* shifted_hash = Add<HShl>(hash, Add<HConstant>(15));
+  HValue* not_hash = Add<HBitwise>(Token::BIT_XOR, hash,
+                                   graph()->GetConstantMinus1());
+  hash = Add<HAdd>(shifted_hash, not_hash);
+
+  // hash = hash ^ (hash >> 12);
+  shifted_hash = Add<HShr>(hash, Add<HConstant>(12));
+  hash = Add<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+
+  // hash = hash + (hash << 2);
+  shifted_hash = Add<HShl>(hash, Add<HConstant>(2));
+  hash = Add<HAdd>(hash, shifted_hash);
+
+  // hash = hash ^ (hash >> 4);
+  shifted_hash = Add<HShr>(hash, Add<HConstant>(4));
+  hash = Add<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+
+  // hash = hash * 2057;
+  hash = Add<HMul>(hash, Add<HConstant>(2057));
+  hash->ClearFlag(HValue::kCanOverflow);
+
+  // hash = hash ^ (hash >> 16);
+  shifted_hash = Add<HShr>(hash, Add<HConstant>(16));
+  return Add<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+}
+
+
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
+                                                           HValue* key) {
+  HValue* elements = AddLoadElements(receiver);
+
+  HValue* hash = BuildElementIndexHash(key);
+
+  HValue* capacity = Add<HLoadKeyed>(
+      elements,
+      Add<HConstant>(NameDictionary::kCapacityIndex),
+      static_cast<HValue*>(NULL),
+      FAST_SMI_ELEMENTS);
+
+  HValue* mask = Add<HSub>(capacity, graph()->GetConstant1());
+  mask->ChangeRepresentation(Representation::Integer32());
+  mask->ClearFlag(HValue::kCanOverflow);
+
+  return BuildUncheckedDictionaryElementLoadHelper(elements, key,
+                                                   hash, mask, 0);
+}
+
+
 HValue* HGraphBuilder::BuildNumberToString(HValue* object,
                                            Handle<Type> type) {
   NoObservableSideEffectsScope scope(this);
@@ -1859,6 +2056,7 @@
       HInstruction* result = AddElementAccess(
           external_elements, key, val, bounds_check, elements_kind, is_store);
       negative_checker.ElseDeopt("Negative key encountered");
+      negative_checker.End();
       length_checker.End();
       return result;
     } else {
@@ -1940,9 +2138,10 @@
       IsFastPackedElementsKind(array_builder->kind())) {
     // We'll come back later with better (holey) feedback.
     if_builder.Deopt("Holey array despite packed elements_kind feedback");
+  } else {
+    Push(checked_length);         // capacity
+    Push(checked_length);         // length
   }
-  Push(checked_length);         // capacity
-  Push(checked_length);         // length
   if_builder.End();
 
   // Figure out total size
@@ -2142,15 +2341,12 @@
   static const int kLoopUnfoldLimit = 8;
   STATIC_ASSERT(JSArray::kPreallocatedArrayElements <= kLoopUnfoldLimit);
   int initial_capacity = -1;
-  if (from->ActualValue()->IsConstant() && to->ActualValue()->IsConstant()) {
-    HConstant* constant_from = HConstant::cast(from->ActualValue());
-    HConstant* constant_to = HConstant::cast(to->ActualValue());
+  if (from->IsInteger32Constant() && to->IsInteger32Constant()) {
+    int constant_from = from->GetInteger32Constant();
+    int constant_to = to->GetInteger32Constant();
 
-    if (constant_from->HasInteger32Value() &&
-        constant_from->Integer32Value() == 0 &&
-        constant_to->HasInteger32Value() &&
-        constant_to->Integer32Value() <= kLoopUnfoldLimit) {
-      initial_capacity = constant_to->Integer32Value();
+    if (constant_from == 0 && constant_to <= kLoopUnfoldLimit) {
+      initial_capacity = constant_to;
     }
   }
 
@@ -2426,7 +2622,7 @@
     return builder()->Add<HConstant>(map);
   }
 
-  if (kind_ == GetInitialFastElementsKind()) {
+  if (constructor_function_ != NULL && kind_ == GetInitialFastElementsKind()) {
     // No need for a context lookup if the kind_ matches the initial
     // map, because we can just load the map in that case.
     HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
@@ -7404,18 +7600,30 @@
   if (prop != NULL) {
     if (!prop->key()->IsPropertyName()) {
       // Keyed function call.
-      CHECK_ALIVE(VisitArgument(prop->obj()));
-
+      CHECK_ALIVE(VisitForValue(prop->obj()));
       CHECK_ALIVE(VisitForValue(prop->key()));
+
       // Push receiver and key like the non-optimized code generator expects it.
       HValue* key = Pop();
       HValue* receiver = Pop();
       Push(key);
-      Push(receiver);
-
+      Push(Add<HPushArgument>(receiver));
       CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
-      call = New<HCallKeyed>(key, argument_count);
+      if (expr->IsMonomorphic()) {
+        BuildCheckHeapObject(receiver);
+        ElementsKind kind = expr->KeyedArrayCallIsHoley()
+            ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+
+        Handle<Map> map(isolate()->get_initial_js_array_map(kind));
+
+        HValue* function = BuildMonomorphicElementAccess(
+            receiver, key, NULL, NULL, map, false, STANDARD_STORE);
+
+        call = New<HCallFunction>(function, argument_count);
+      } else {
+        call = New<HCallKeyed>(key, argument_count);
+      }
       Drop(argument_count + 1);  // 1 is the key.
       return ast_context()->ReturnInstruction(call, expr->id());
     }
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 7987a97..0ea3b4e 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -953,7 +953,10 @@
 
 class HIfContinuation V8_FINAL {
  public:
-  HIfContinuation() : continuation_captured_(false) {}
+  HIfContinuation()
+    : continuation_captured_(false),
+      true_branch_(NULL),
+      false_branch_(NULL) {}
   HIfContinuation(HBasicBlock* true_branch,
                   HBasicBlock* false_branch)
       : continuation_captured_(true), true_branch_(true_branch),
@@ -1276,6 +1279,9 @@
 
   HValue* BuildNumberToString(HValue* object, Handle<Type> type);
 
+  HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
+                                              HValue* key);
+
   // Computes the size for a sequential string of the given length and encoding.
   HValue* BuildSeqStringSizeFor(HValue* length,
                                 String::Encoding encoding);
@@ -1493,6 +1499,10 @@
     void End();
 
     void Deopt(const char* reason);
+    void ThenDeopt(const char* reason) {
+      Then();
+      Deopt(reason);
+    }
     void ElseDeopt(const char* reason) {
       Else();
       Deopt(reason);
@@ -1505,21 +1515,41 @@
 
     HGraphBuilder* builder() const { return builder_; }
 
+    void AddMergeAtJoinBlock(bool deopt);
+
+    void Finish();
+    void Finish(HBasicBlock** then_continuation,
+                HBasicBlock** else_continuation);
+
+    class MergeAtJoinBlock : public ZoneObject {
+     public:
+      MergeAtJoinBlock(HBasicBlock* block,
+                       bool deopt,
+                       MergeAtJoinBlock* next)
+        : block_(block),
+          deopt_(deopt),
+          next_(next) {}
+      HBasicBlock* block_;
+      bool deopt_;
+      MergeAtJoinBlock* next_;
+    };
+
     HGraphBuilder* builder_;
     bool finished_ : 1;
-    bool deopt_then_ : 1;
-    bool deopt_else_ : 1;
     bool did_then_ : 1;
     bool did_else_ : 1;
+    bool did_else_if_ : 1;
     bool did_and_ : 1;
     bool did_or_ : 1;
     bool captured_ : 1;
     bool needs_compare_ : 1;
+    bool pending_merge_block_ : 1;
     HBasicBlock* first_true_block_;
-    HBasicBlock* last_true_block_;
     HBasicBlock* first_false_block_;
     HBasicBlock* split_edge_merge_block_;
-    HBasicBlock* merge_block_;
+    MergeAtJoinBlock* merge_at_join_blocks_;
+    int normal_merge_at_join_block_count_;
+    int deopt_merge_at_join_block_count_;
   };
 
   class LoopBuilder V8_FINAL {
@@ -1583,7 +1613,7 @@
 
     JSArrayBuilder(HGraphBuilder* builder,
                    ElementsKind kind,
-                   HValue* constructor_function);
+                   HValue* constructor_function = NULL);
 
     enum FillMode {
       DONT_FILL_WITH_HOLE,
@@ -1596,6 +1626,7 @@
     HValue* AllocateArray(HValue* capacity, HValue* length_field,
                           FillMode fill_mode = FILL_WITH_HOLE);
     HValue* GetElementsLocation() { return elements_location_; }
+    HValue* EmitMapCode();
 
    private:
     Zone* zone() const { return builder_->zone(); }
@@ -1609,7 +1640,6 @@
       return JSArray::kPreallocatedArrayElements;
     }
 
-    HValue* EmitMapCode();
     HValue* EmitInternalMapCode();
     HValue* EstablishEmptyArrayAllocationSize();
     HValue* EstablishAllocationSize(HValue* length_node);
@@ -1674,6 +1704,8 @@
                                  ElementsKind kind,
                                  int length);
 
+  HValue* BuildElementIndexHash(HValue* index);
+
   void BuildCompareNil(
       HValue* value,
       Handle<Type> type,
@@ -1700,6 +1732,13 @@
  private:
   HGraphBuilder();
 
+  HValue* BuildUncheckedDictionaryElementLoadHelper(
+      HValue* elements,
+      HValue* key,
+      HValue* hash,
+      HValue* mask,
+      int current_probe);
+
   void PadEnvironmentForContinuation(HBasicBlock* from,
                                      HBasicBlock* continuation);
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index a1aa022..f06cc5b 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -118,6 +118,17 @@
 }
 
 
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { edx, ecx };
+  descriptor->register_param_count_ = 2;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
 void LoadFieldStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -138,6 +149,19 @@
 }
 
 
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { ecx };
+  descriptor->register_param_count_ = 1;
+  descriptor->register_params_ = registers;
+  descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -5659,6 +5683,24 @@
 }
 
 
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+  __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+  __ mov(edi, eax);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ mov(eax, MemOperand(ebp, parameter_count_offset));
+  // The parameter count above includes the receiver for the arguments passed to
+  // the deoptimization handler. Subtract the receiver for the parameter count
+  // for the call.
+  __ sub(eax, Immediate(1));
+  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  ParameterCount argument_count(eax);
+  __ InvokeFunction(
+      edi, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     // It's always safe to call the entry hook stub, as the hook itself
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index de52fad..fd70b77 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -781,17 +781,36 @@
 }
 
 
+static int ArgumentsOffsetWithoutFrame(int index) {
+  ASSERT(index < 0);
+  return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
 Operand LCodeGen::ToOperand(LOperand* op) const {
   if (op->IsRegister()) return Operand(ToRegister(op));
   if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
-  return Operand(ebp, StackSlotOffset(op->index()));
+  if (NeedsEagerFrame()) {
+    return Operand(ebp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
 }
 
 
 Operand LCodeGen::HighOperand(LOperand* op) {
   ASSERT(op->IsDoubleStackSlot());
-  return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+  if (NeedsEagerFrame()) {
+    return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return Operand(
+        esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
 }
 
 
@@ -4376,7 +4395,12 @@
 
   int arity = instr->arity();
   CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ leave();
+    __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+  } else {
+    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  }
 }
 
 
@@ -6045,19 +6069,22 @@
     __ push(Immediate(Smi::FromInt(size)));
   }
 
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
-    CallRuntimeFromDeferred(
-        Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
+    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
-    CallRuntimeFromDeferred(
-        Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
+    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
-    CallRuntimeFromDeferred(
-        Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   }
+  __ push(Immediate(Smi::FromInt(flags)));
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(result, eax);
 }
 
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 29b42fe..278d600 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1410,8 +1410,10 @@
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* function = UseFixed(instr->function(), edi);
-  LCallFunction* result = new(zone()) LCallFunction(context, function);
-  return MarkAsCall(DefineFixed(result, eax), instr);
+  LCallFunction* call = new(zone()) LCallFunction(context, function);
+  LInstruction* result = DefineFixed(call, eax);
+  if (instr->IsTailCall()) return result;
+  return MarkAsCall(result, instr);
 }
 
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 1bdd382..a46f8d9 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1412,8 +1412,9 @@
 }
 
 
-// Compute the hash code from the untagged key.  This must be kept in sync
-// with ComputeIntegerHash in utils.h.
+// Compute the hash code from the untagged key.  This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
 //
 // Note: r0 will contain hash code
 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
@@ -1489,8 +1490,7 @@
   dec(r1);
 
   // Generate an unrolled loop that performs a few probes before giving up.
-  const int kProbes = 4;
-  for (int i = 0; i < kProbes; i++) {
+  for (int i = 0; i < kNumberDictionaryProbes; i++) {
     // Use r2 for index calculations and keep the hash intact in r0.
     mov(r2, r0);
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -1508,7 +1508,7 @@
                           r2,
                           times_pointer_size,
                           SeededNumberDictionary::kElementsStartOffset));
-    if (i != (kProbes - 1)) {
+    if (i != (kNumberDictionaryProbes - 1)) {
       j(equal, &done);
     } else {
       j(not_equal, miss);
diff --git a/src/ic.cc b/src/ic.cc
index 5cc54d2..2f7db26 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -804,16 +804,34 @@
   if (use_ic && state() != MEGAMORPHIC) {
     ASSERT(!object->IsJSGlobalProxy());
     int argc = target()->arguments_count();
-    Handle<Code> stub = isolate()->stub_cache()->ComputeCallMegamorphic(
-        argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
-    if (object->IsJSObject()) {
-      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-      if (receiver->elements()->map() ==
-          isolate()->heap()->non_strict_arguments_elements_map()) {
-        stub = isolate()->stub_cache()->ComputeCallArguments(argc);
+    Handle<Code> stub;
+
+    // Use the KeyedArrayCallStub if the call is of the form array[smi](...),
+    // where array is an instance of one of the initial array maps (without
+    // extra named properties).
+    // TODO(verwaest): Also support keyed calls on instances of other maps.
+    if (object->IsJSArray() && key->IsSmi()) {
+      Handle<JSArray> array = Handle<JSArray>::cast(object);
+      ElementsKind kind = array->map()->elements_kind();
+      if (IsFastObjectElementsKind(kind) &&
+          array->map() == isolate()->get_initial_js_array_map(kind)) {
+        KeyedArrayCallStub stub_gen(IsHoleyElementsKind(kind), argc);
+        stub = stub_gen.GetCode(isolate());
       }
     }
-    ASSERT(!stub.is_null());
+
+    if (stub.is_null()) {
+      stub = isolate()->stub_cache()->ComputeCallMegamorphic(
+          argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
+      if (object->IsJSObject()) {
+        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+        if (receiver->elements()->map() ==
+            isolate()->heap()->non_strict_arguments_elements_map()) {
+          stub = isolate()->stub_cache()->ComputeCallArguments(argc);
+        }
+      }
+      ASSERT(!stub.is_null());
+    }
     set_target(*stub);
     TRACE_IC("CallIC", key);
   }
@@ -2149,6 +2167,28 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_MissFromStubFailure) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  KeyedCallIC ic(isolate);
+  Arguments* caller_args = reinterpret_cast<Arguments*>(args[0]);
+  Handle<Object> key = args.at<Object>(1);
+  Handle<Object> receiver((*caller_args)[0], isolate);
+
+  ic.UpdateState(receiver, key);
+  MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
+  // Result could be a function or a failure.
+  JSFunction* raw_function = NULL;
+  if (!maybe_result->To(&raw_function)) return maybe_result;
+
+  if (raw_function->is_compiled()) return raw_function;
+
+  Handle<JSFunction> function(raw_function, isolate);
+  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  return *function;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
   SealHandleScope shs(isolate);
 
diff --git a/src/ic.h b/src/ic.h
index bffb290..cc63767 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -910,6 +910,7 @@
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_MissFromStubFailure);
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss);
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
diff --git a/src/isolate.cc b/src/isolate.cc
index 740cce0..07bb2ae 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -2238,6 +2238,8 @@
   bootstrapper_->Initialize(create_heap_objects);
   builtins_.SetUp(this, create_heap_objects);
 
+  if (create_heap_objects) heap_.CreateStubsRequiringBuiltins();
+
   // Only preallocate on the first initialization.
   if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
     // Start the thread which will set aside some memory.
@@ -2314,6 +2316,7 @@
     CodeStub::GenerateFPStubs(this);
     StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
     StubFailureTrampolineStub::GenerateAheadOfTime(this);
+    StubFailureTailCallTrampolineStub::GenerateAheadOfTime(this);
     // TODO(mstarzinger): The following is an ugly hack to make sure the
     // interface descriptor is initialized even when stubs have been
     // deserialized out of the snapshot without the graph builder.
diff --git a/src/lithium.cc b/src/lithium.cc
index 966afa9..ee8ea3e 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -233,36 +233,6 @@
 }
 
 
-int ElementsKindToShiftSize(ElementsKind elements_kind) {
-  switch (elements_kind) {
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      return 0;
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      return 1;
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-      return 2;
-    case EXTERNAL_DOUBLE_ELEMENTS:
-    case FAST_DOUBLE_ELEMENTS:
-    case FAST_HOLEY_DOUBLE_ELEMENTS:
-      return 3;
-    case FAST_SMI_ELEMENTS:
-    case FAST_ELEMENTS:
-    case FAST_HOLEY_SMI_ELEMENTS:
-    case FAST_HOLEY_ELEMENTS:
-    case DICTIONARY_ELEMENTS:
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-      return kPointerSizeLog2;
-  }
-  UNREACHABLE();
-  return 0;
-}
-
-
 int StackSlotOffset(int index) {
   if (index >= 0) {
     // Local or spill slot. Skip the frame pointer, function, and
@@ -372,7 +342,8 @@
   // shift all parameter indexes down by the number of parameters, and
   // make sure they end up negative so they are distinguishable from
   // spill slots.
-  int result = index - info()->scope()->num_parameters() - 1;
+  int result = index - info()->num_parameters() - 1;
+
   ASSERT(result < 0);
   return result;
 }
diff --git a/src/lithium.h b/src/lithium.h
index 4f84087..d4395f2 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -791,7 +791,6 @@
 };
 
 
-int ElementsKindToShiftSize(ElementsKind elements_kind);
 int StackSlotOffset(int index);
 
 enum NumberUntagDMode {
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 77b5dee..e28eb7a 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -114,6 +114,17 @@
 }
 
 
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = {a1, a0 };
+  descriptor->register_param_count_ = 2;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
 void LoadFieldStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -134,6 +145,19 @@
 }
 
 
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { a2 };
+  descriptor->register_param_count_ = 1;
+  descriptor->register_params_ = registers;
+  descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -5854,6 +5878,24 @@
 }
 
 
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+  __ mov(a1, v0);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ lw(a0, MemOperand(fp, parameter_count_offset));
+  // The parameter count above includes the receiver for the arguments passed to
+  // the deoptimization handler. Subtract the receiver for the parameter count
+  // for the call.
+  __ Subu(a0, a0, 1);
+  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  ParameterCount argument_count(a0);
+  __ InvokeFunction(
+      a1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     AllowStubCallsScope allow_stub_calls(masm, true);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 7f6e869..d71c055 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -488,17 +488,36 @@
 }
 
 
+static int ArgumentsOffsetWithoutFrame(int index) {
+  ASSERT(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+
 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
   ASSERT(!op->IsRegister());
   ASSERT(!op->IsDoubleRegister());
   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
-  return MemOperand(fp, StackSlotOffset(op->index()));
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
 }
 
 
 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
   ASSERT(op->IsDoubleStackSlot());
-  return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(
+        sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
 }
 
 
@@ -4015,7 +4034,12 @@
 
   int arity = instr->arity();
   CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ mov(sp, fp);
+    __ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+  } else {
+    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  }
 }
 
 
@@ -5411,19 +5435,22 @@
     __ Push(Smi::FromInt(size));
   }
 
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
-    CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
-                            instr->context());
+    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
-    CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
-                            instr->context());
+    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
-    CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
-                            instr->context());
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   }
+  __ Push(Smi::FromInt(flags));
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(v0, result);
 }
 
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 03c64d9..1635b77 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1330,8 +1330,10 @@
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* function = UseFixed(instr->function(), a1);
-  return MarkAsCall(
-      DefineFixed(new(zone()) LCallFunction(context, function), v0), instr);
+  LCallFunction* call = new(zone()) LCallFunction(context, function);
+  LInstruction* result = DefineFixed(call, v0);
+  if (instr->IsTailCall()) return result;
+  return MarkAsCall(result, instr);
 }
 
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 295e9c0..2fdfb67 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -510,8 +510,7 @@
   Subu(reg1, reg1, Operand(1));
 
   // Generate an unrolled loop that performs a few probes before giving up.
-  static const int kProbes = 4;
-  for (int i = 0; i < kProbes; i++) {
+  for (int i = 0; i < kNumberDictionaryProbes; i++) {
     // Use reg2 for index calculations and keep the hash intact in reg0.
     mov(reg2, reg0);
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -530,7 +529,7 @@
     addu(reg2, elements, at);
 
     lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
-    if (i != kProbes - 1) {
+    if (i != kNumberDictionaryProbes - 1) {
       Branch(&done, eq, key, Operand(at));
     } else {
       Branch(miss, ne, key, Operand(at));
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 9358f42..20a44c3 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3901,6 +3901,7 @@
          kind() == STORE_IC ||
          kind() == LOAD_IC ||
          kind() == KEYED_LOAD_IC ||
+         kind() == KEYED_CALL_IC ||
          kind() == TO_BOOLEAN_IC);
   return StubMajorKeyField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
@@ -3917,6 +3918,7 @@
          kind() == KEYED_LOAD_IC ||
          kind() == STORE_IC ||
          kind() == KEYED_STORE_IC ||
+         kind() == KEYED_CALL_IC ||
          kind() == TO_BOOLEAN_IC);
   ASSERT(0 <= major && major < 256);
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
diff --git a/src/runtime.cc b/src/runtime.cc
index 2cf033c..28506dd 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -917,6 +917,12 @@
   ASSERT(byte_length % element_size == 0);
   size_t length = byte_length / element_size;
 
+  if (length > static_cast<unsigned>(Smi::kMaxValue)) {
+    return isolate->Throw(*isolate->factory()->
+          NewRangeError("invalid_typed_array_length",
+            HandleVector<Object>(NULL, 0)));
+  }
+
   Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
   holder->set_length(*length_obj);
   holder->set_weak_next(buffer->weak_first_view());
@@ -956,9 +962,11 @@
 
   Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
   size_t length = NumberToSize(isolate, *length_obj);
-  if (length > (kMaxInt / element_size)) {
+
+  if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
+      (length > (kMaxInt / element_size))) {
     return isolate->Throw(*isolate->factory()->
-          NewRangeError("invalid_array_buffer_length",
+          NewRangeError("invalid_typed_array_length",
             HandleVector<Object>(NULL, 0)));
   }
   size_t byte_length = length * element_size;
@@ -9775,6 +9783,7 @@
 // Used as a fall-back for generated code when the space is full.
 static MaybeObject* Allocate(Isolate* isolate,
                              int size,
+                             bool double_align,
                              AllocationSpace space) {
   Heap* heap = isolate->heap();
   RUNTIME_ASSERT(IsAligned(size, kPointerSize));
@@ -9796,24 +9805,19 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
   SealHandleScope shs(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
-  return Allocate(isolate, size_smi->value(), NEW_SPACE);
+  CONVERT_SMI_ARG_CHECKED(size, 0);
+  return Allocate(isolate, size, false, NEW_SPACE);
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldPointerSpace) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInTargetSpace) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
-  return Allocate(isolate, size_smi->value(), OLD_POINTER_SPACE);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldDataSpace) {
-  SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
-  return Allocate(isolate, size_smi->value(), OLD_DATA_SPACE);
+  ASSERT(args.length() == 2);
+  CONVERT_SMI_ARG_CHECKED(size, 0);
+  CONVERT_SMI_ARG_CHECKED(flags, 1);
+  bool double_align = AllocateDoubleAlignFlag::decode(flags);
+  AllocationSpace space = AllocateTargetSpace::decode(flags);
+  return Allocate(isolate, size, double_align, space);
 }
 
 
@@ -14813,6 +14817,11 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MaxSmi) {
+  return Smi::FromInt(Smi::kMaxValue);
+}
+
+
 // ----------------------------------------------------------------------------
 // Implementation of Runtime
 
diff --git a/src/runtime.h b/src/runtime.h
index 734875f..8b650e9 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -103,8 +103,7 @@
   F(CompileForOnStackReplacement, 2, 1) \
   F(SetAllocationTimeout, 2, 1) \
   F(AllocateInNewSpace, 1, 1) \
-  F(AllocateInOldPointerSpace, 1, 1) \
-  F(AllocateInOldDataSpace, 1, 1) \
+  F(AllocateInTargetSpace, 2, 1) \
   F(SetNativeFlag, 1, 1) \
   F(SetInlineBuiltinFlag, 1, 1) \
   F(StoreArrayLiteralElement, 5, 1) \
@@ -113,6 +112,7 @@
   F(FlattenString, 1, 1) \
   F(MigrateInstance, 1, 1) \
   F(NotifyContextDisposed, 0, 1) \
+  F(MaxSmi, 0, 1) \
   \
   /* Array join support */ \
   F(PushIfAbsent, 2, 1) \
@@ -844,6 +844,9 @@
 //---------------------------------------------------------------------------
 // Constants used by interface to runtime functions.
 
+class AllocateDoubleAlignFlag:    public BitField<bool,            0, 1> {};
+class AllocateTargetSpace:        public BitField<AllocationSpace, 1, 3> {};
+
 class DeclareGlobalsEvalFlag:     public BitField<bool,         0, 1> {};
 class DeclareGlobalsNativeFlag:   public BitField<bool,         1, 1> {};
 class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
diff --git a/src/serialize.cc b/src/serialize.cc
index 26611e7..36e19c1 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -320,8 +320,6 @@
       7,
       "IncrementalMarking::RecordWrite");
 
-
-
   // Miscellaneous
   Add(ExternalReference::roots_array_start(isolate).address(),
       UNCLASSIFIED,
@@ -531,20 +529,20 @@
       UNCLASSIFIED,
       53,
       "Runtime::AllocateInNewSpace");
+  Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
+      UNCLASSIFIED,
+      54,
+      "Runtime::AllocateInTargetSpace");
   Add(ExternalReference::old_pointer_space_allocation_top_address(
       isolate).address(),
       UNCLASSIFIED,
-      54,
+      55,
       "Heap::OldPointerSpaceAllocationTopAddress");
   Add(ExternalReference::old_pointer_space_allocation_limit_address(
       isolate).address(),
       UNCLASSIFIED,
-      55,
-      "Heap::OldPointerSpaceAllocationLimitAddress");
-  Add(ExternalReference(Runtime::kAllocateInOldPointerSpace, isolate).address(),
-      UNCLASSIFIED,
       56,
-      "Runtime::AllocateInOldPointerSpace");
+      "Heap::OldPointerSpaceAllocationLimitAddress");
   Add(ExternalReference::old_data_space_allocation_top_address(
       isolate).address(),
       UNCLASSIFIED,
@@ -555,26 +553,22 @@
       UNCLASSIFIED,
       58,
       "Heap::OldDataSpaceAllocationLimitAddress");
-  Add(ExternalReference(Runtime::kAllocateInOldDataSpace, isolate).address(),
-      UNCLASSIFIED,
-      59,
-      "Runtime::AllocateInOldDataSpace");
   Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
       address(),
       UNCLASSIFIED,
-      60,
+      59,
       "Heap::NewSpaceAllocationLimitAddress");
   Add(ExternalReference::allocation_sites_list_address(isolate).address(),
       UNCLASSIFIED,
-      61,
+      60,
       "Heap::allocation_sites_list_address()");
   Add(ExternalReference::address_of_uint32_bias().address(),
       UNCLASSIFIED,
-      62,
+      61,
       "uint32_bias");
   Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
       UNCLASSIFIED,
-      63,
+      62,
       "Code::MarkCodeAsExecuted");
 
   // Add a small set of deopt entry addresses to encoder without generating the
@@ -586,7 +580,7 @@
         entry,
         Deoptimizer::LAZY,
         Deoptimizer::CALCULATE_ENTRY_ADDRESS);
-    Add(address, LAZY_DEOPTIMIZATION, 64 + entry, "lazy_deopt");
+    Add(address, LAZY_DEOPTIMIZATION, entry, "lazy_deopt");
   }
 }
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 7a11c23..af1f3a1 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1480,8 +1480,9 @@
         elements_kind).GetCode(isolate());
     __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
   } else {
-    Handle<Code> stub =
-        KeyedLoadDictionaryElementStub().GetCode(isolate());
+    Handle<Code> stub = FLAG_compiled_keyed_dictionary_loads
+        ? KeyedLoadDictionaryElementStub().GetCode(isolate())
+        : KeyedLoadDictionaryElementPlatformStub().GetCode(isolate());
     __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
   }
 
diff --git a/src/type-info.cc b/src/type-info.cc
index c548ad3..36a2dd1 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -200,7 +200,15 @@
 bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
   Handle<Object> value = GetInfo(expr->CallFeedbackId());
   return value->IsMap() || value->IsAllocationSite() || value->IsJSFunction() ||
-      value->IsSmi();
+      value->IsSmi() ||
+      (value->IsCode() && Handle<Code>::cast(value)->ic_state() == MONOMORPHIC);
+}
+
+
+bool TypeFeedbackOracle::KeyedArrayCallIsHoley(Call* expr) {
+  Handle<Object> value = GetInfo(expr->CallFeedbackId());
+  Handle<Code> code = Handle<Code>::cast(value);
+  return KeyedArrayCallStub::IsHoley(code);
 }
 
 
@@ -617,7 +625,6 @@
       case Code::LOAD_IC:
       case Code::STORE_IC:
       case Code::CALL_IC:
-      case Code::KEYED_CALL_IC:
         if (target->ic_state() == MONOMORPHIC) {
           if (target->kind() == Code::CALL_IC &&
               target->check_type() != RECEIVER_MAP_CHECK) {
@@ -637,6 +644,7 @@
         }
         break;
 
+      case Code::KEYED_CALL_IC:
       case Code::KEYED_LOAD_IC:
       case Code::KEYED_STORE_IC:
       case Code::BINARY_OP_IC:
diff --git a/src/type-info.h b/src/type-info.h
index f295c06..a116971 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -250,6 +250,7 @@
   bool StoreIsPreMonomorphic(TypeFeedbackId ast_id);
   bool StoreIsKeyedPolymorphic(TypeFeedbackId ast_id);
   bool CallIsMonomorphic(Call* expr);
+  bool KeyedArrayCallIsHoley(Call* expr);
   bool CallNewIsMonomorphic(CallNew* expr);
   bool ObjectLiteralStoreIsMonomorphic(ObjectLiteralProperty* prop);
 
diff --git a/src/typedarray.js b/src/typedarray.js
index 422dc4a..ca87f8b 100644
--- a/src/typedarray.js
+++ b/src/typedarray.js
@@ -87,6 +87,9 @@
   function NAMEConstructByLength(obj, length) {
     var l = IS_UNDEFINED(length) ?
       0 : ToPositiveInteger(length, "invalid_typed_array_length");
+    if (l > %MaxSmi()) {
+      throw MakeRangeError("invalid_typed_array_length");
+    }
     var byteLength = l * ELEMENT_SIZE;
     var buffer = new $ArrayBuffer(byteLength);
     %TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
diff --git a/src/types.cc b/src/types.cc
index 17a19b2..9c576c0 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -70,23 +70,23 @@
 }
 
 template<>
-Handle<Map> Type::Iterator<Map>::Current() {
+Handle<i::Map> Type::Iterator<i::Map>::Current() {
   return get_type()->as_class();
 }
 
 template<>
-Handle<v8::internal::Object> Type::Iterator<v8::internal::Object>::Current() {
+Handle<i::Object> Type::Iterator<i::Object>::Current() {
   return get_type()->as_constant();
 }
 
 
 template<>
-bool Type::Iterator<Map>::matches(Handle<Type> type) {
+bool Type::Iterator<i::Map>::matches(Handle<Type> type) {
   return type->is_class();
 }
 
 template<>
-bool Type::Iterator<v8::internal::Object>::matches(Handle<Type> type) {
+bool Type::Iterator<i::Object>::matches(Handle<Type> type) {
   return type->is_constant();
 }
 
@@ -105,8 +105,8 @@
   index_ = -1;
 }
 
-template class Type::Iterator<Map>;
-template class Type::Iterator<v8::internal::Object>;
+template class Type::Iterator<i::Map>;
+template class Type::Iterator<i::Object>;
 
 
 // Get the smallest bitset subsuming this type.
@@ -120,106 +120,112 @@
       bitset |= union_get(unioned, i)->LubBitset();
     }
     return bitset;
+  } else if (this->is_class()) {
+    return LubBitset(*this->as_class());
   } else {
-    Map* map = NULL;
-    if (this->is_class()) {
-      map = *this->as_class();
-    } else {
-      Handle<v8::internal::Object> value = this->as_constant();
-      if (value->IsSmi()) return kSmi;
-      map = HeapObject::cast(*value)->map();
-      if (map->instance_type() == HEAP_NUMBER_TYPE) {
-        int32_t i;
-        uint32_t u;
-        if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
-        if (value->ToUint32(&u)) return kUnsigned32;
-        return kDouble;
-      }
-      if (map->instance_type() == ODDBALL_TYPE) {
-        if (value->IsUndefined()) return kUndefined;
-        if (value->IsNull()) return kNull;
-        if (value->IsTrue() || value->IsFalse()) return kBoolean;
-        if (value->IsTheHole()) return kAny;  // TODO(rossberg): kNone?
-        UNREACHABLE();
-      }
-    }
-    switch (map->instance_type()) {
-      case STRING_TYPE:
-      case ASCII_STRING_TYPE:
-      case CONS_STRING_TYPE:
-      case CONS_ASCII_STRING_TYPE:
-      case SLICED_STRING_TYPE:
-      case SLICED_ASCII_STRING_TYPE:
-      case EXTERNAL_STRING_TYPE:
-      case EXTERNAL_ASCII_STRING_TYPE:
-      case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
-      case SHORT_EXTERNAL_STRING_TYPE:
-      case SHORT_EXTERNAL_ASCII_STRING_TYPE:
-      case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
-      case INTERNALIZED_STRING_TYPE:
-      case ASCII_INTERNALIZED_STRING_TYPE:
-      case CONS_INTERNALIZED_STRING_TYPE:
-      case CONS_ASCII_INTERNALIZED_STRING_TYPE:
-      case EXTERNAL_INTERNALIZED_STRING_TYPE:
-      case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
-      case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
-      case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
-      case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
-      case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
-        return kString;
-      case SYMBOL_TYPE:
-        return kSymbol;
-      case ODDBALL_TYPE:
-        return kOddball;
-      case HEAP_NUMBER_TYPE:
-        return kDouble;
-      case JS_VALUE_TYPE:
-      case JS_DATE_TYPE:
-      case JS_OBJECT_TYPE:
-      case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
-      case JS_GENERATOR_OBJECT_TYPE:
-      case JS_MODULE_TYPE:
-      case JS_GLOBAL_OBJECT_TYPE:
-      case JS_BUILTINS_OBJECT_TYPE:
-      case JS_GLOBAL_PROXY_TYPE:
-      case JS_ARRAY_BUFFER_TYPE:
-      case JS_TYPED_ARRAY_TYPE:
-      case JS_DATA_VIEW_TYPE:
-      case JS_SET_TYPE:
-      case JS_MAP_TYPE:
-      case JS_WEAK_MAP_TYPE:
-      case JS_WEAK_SET_TYPE:
-        if (map->is_undetectable()) return kUndetectable;
-        return kOtherObject;
-      case JS_ARRAY_TYPE:
-        return kArray;
-      case JS_FUNCTION_TYPE:
-        return kFunction;
-      case JS_REGEXP_TYPE:
-        return kRegExp;
-      case JS_PROXY_TYPE:
-      case JS_FUNCTION_PROXY_TYPE:
-        return kProxy;
-      case MAP_TYPE:
-        // When compiling stub templates, the meta map is used as a place holder
-        // for the actual map with which the template is later instantiated.
-        // We treat it as a kind of type variable whose upper bound is Any.
-        // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
-        // we must exclude Undetectable here. This makes no sense, really,
-        // because it means that the template isn't actually parametric.
-        // Also, it doesn't apply elsewhere. 8-(
-        // We ought to find a cleaner solution for compiling stubs parameterised
-        // over type or class variables, esp ones with bounds...
-        return kDetectable;
-      case DECLARED_ACCESSOR_INFO_TYPE:
-      case EXECUTABLE_ACCESSOR_INFO_TYPE:
-      case ACCESSOR_PAIR_TYPE:
-      case FIXED_ARRAY_TYPE:
-        return kInternal;
-      default:
-        UNREACHABLE();
-        return kNone;
-    }
+    return LubBitset(*this->as_constant());
+  }
+}
+
+
+int Type::LubBitset(i::Object* value) {
+  if (value->IsSmi()) return kSmi;
+  i::Map* map = i::HeapObject::cast(value)->map();
+  if (map->instance_type() == HEAP_NUMBER_TYPE) {
+    int32_t i;
+    uint32_t u;
+    if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
+    if (value->ToUint32(&u)) return kUnsigned32;
+    return kDouble;
+  }
+  if (map->instance_type() == ODDBALL_TYPE) {
+    if (value->IsUndefined()) return kUndefined;
+    if (value->IsNull()) return kNull;
+    if (value->IsBoolean()) return kBoolean;
+    if (value->IsTheHole()) return kAny;  // TODO(rossberg): kNone?
+    UNREACHABLE();
+  }
+  return Type::LubBitset(map);
+}
+
+
+int Type::LubBitset(i::Map* map) {
+  switch (map->instance_type()) {
+    case STRING_TYPE:
+    case ASCII_STRING_TYPE:
+    case CONS_STRING_TYPE:
+    case CONS_ASCII_STRING_TYPE:
+    case SLICED_STRING_TYPE:
+    case SLICED_ASCII_STRING_TYPE:
+    case EXTERNAL_STRING_TYPE:
+    case EXTERNAL_ASCII_STRING_TYPE:
+    case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+    case SHORT_EXTERNAL_STRING_TYPE:
+    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+    case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+    case INTERNALIZED_STRING_TYPE:
+    case ASCII_INTERNALIZED_STRING_TYPE:
+    case CONS_INTERNALIZED_STRING_TYPE:
+    case CONS_ASCII_INTERNALIZED_STRING_TYPE:
+    case EXTERNAL_INTERNALIZED_STRING_TYPE:
+    case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+    case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+    case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+    case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+    case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+      return kString;
+    case SYMBOL_TYPE:
+      return kSymbol;
+    case ODDBALL_TYPE:
+      return kOddball;
+    case HEAP_NUMBER_TYPE:
+      return kDouble;
+    case JS_VALUE_TYPE:
+    case JS_DATE_TYPE:
+    case JS_OBJECT_TYPE:
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+    case JS_GENERATOR_OBJECT_TYPE:
+    case JS_MODULE_TYPE:
+    case JS_GLOBAL_OBJECT_TYPE:
+    case JS_BUILTINS_OBJECT_TYPE:
+    case JS_GLOBAL_PROXY_TYPE:
+    case JS_ARRAY_BUFFER_TYPE:
+    case JS_TYPED_ARRAY_TYPE:
+    case JS_DATA_VIEW_TYPE:
+    case JS_SET_TYPE:
+    case JS_MAP_TYPE:
+    case JS_WEAK_MAP_TYPE:
+    case JS_WEAK_SET_TYPE:
+      if (map->is_undetectable()) return kUndetectable;
+      return kOtherObject;
+    case JS_ARRAY_TYPE:
+      return kArray;
+    case JS_FUNCTION_TYPE:
+      return kFunction;
+    case JS_REGEXP_TYPE:
+      return kRegExp;
+    case JS_PROXY_TYPE:
+    case JS_FUNCTION_PROXY_TYPE:
+      return kProxy;
+    case MAP_TYPE:
+      // When compiling stub templates, the meta map is used as a place holder
+      // for the actual map with which the template is later instantiated.
+      // We treat it as a kind of type variable whose upper bound is Any.
+      // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
+      // we must exclude Undetectable here. This makes no sense, really,
+      // because it means that the template isn't actually parametric.
+      // Also, it doesn't apply elsewhere. 8-(
+      // We ought to find a cleaner solution for compiling stubs parameterised
+      // over type or class variables, esp ones with bounds...
+      return kDetectable;
+    case DECLARED_ACCESSOR_INFO_TYPE:
+    case EXECUTABLE_ACCESSOR_INFO_TYPE:
+    case ACCESSOR_PAIR_TYPE:
+    case FIXED_ARRAY_TYPE:
+      return kInternal;
+    default:
+      UNREACHABLE();
+      return kNone;
   }
 }
 
@@ -237,6 +243,18 @@
 }
 
 
+// Most precise _current_ type of a value (usually its class).
+Type* Type::CurrentOf(Handle<i::Object> value) {
+  if (value->IsSmi()) return Smi();
+  i::Map* map = i::HeapObject::cast(*value)->map();
+  if (map->instance_type() == HEAP_NUMBER_TYPE ||
+      map->instance_type() == ODDBALL_TYPE) {
+    return Type::Of(value);
+  }
+  return Class(i::handle(map));
+}
+
+
 // Check this <= that.
 bool Type::SlowIs(Type* that) {
   // Fast path for bitsets.
@@ -374,11 +392,11 @@
   Isolate* isolate = NULL;
   int size = type1->is_bitset() || type2->is_bitset() ? 1 : 0;
   if (!type1->is_bitset()) {
-    isolate = HeapObject::cast(*type1)->GetIsolate();
+    isolate = i::HeapObject::cast(*type1)->GetIsolate();
     size += (type1->is_union() ? type1->as_union()->length() : 1);
   }
   if (!type2->is_bitset()) {
-    isolate = HeapObject::cast(*type2)->GetIsolate();
+    isolate = i::HeapObject::cast(*type2)->GetIsolate();
     size += (type2->is_union() ? type2->as_union()->length() : 1);
   }
   ASSERT(isolate != NULL);
@@ -450,11 +468,11 @@
   Isolate* isolate = NULL;
   int size = 0;
   if (!type1->is_bitset()) {
-    isolate = HeapObject::cast(*type1)->GetIsolate();
+    isolate = i::HeapObject::cast(*type1)->GetIsolate();
     size = (type1->is_union() ? type1->as_union()->length() : 2);
   }
   if (!type2->is_bitset()) {
-    isolate = HeapObject::cast(*type2)->GetIsolate();
+    isolate = i::HeapObject::cast(*type2)->GetIsolate();
     int size2 = (type2->is_union() ? type2->as_union()->length() : 2);
     size = (size == 0 ? size2 : Min(size, size2));
   }
diff --git a/src/types.h b/src/types.h
index 5d437e2..2e21a9e 100644
--- a/src/types.h
+++ b/src/types.h
@@ -144,11 +144,11 @@
   TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
   #undef DEFINE_TYPE_CONSTRUCTOR
 
-  static Type* Class(Handle<Map> map) { return from_handle(map); }
-  static Type* Constant(Handle<HeapObject> value) {
+  static Type* Class(Handle<i::Map> map) { return from_handle(map); }
+  static Type* Constant(Handle<i::HeapObject> value) {
     return Constant(value, value->GetIsolate());
   }
-  static Type* Constant(Handle<v8::internal::Object> value, Isolate* isolate) {
+  static Type* Constant(Handle<i::Object> value, Isolate* isolate) {
     return from_handle(isolate->factory()->NewBox(value));
   }
 
@@ -156,6 +156,11 @@
   static Type* Intersect(Handle<Type> type1, Handle<Type> type2);
   static Type* Optional(Handle<Type> type);  // type \/ Undefined
 
+  static Type* Of(Handle<i::Object> value) {
+    return from_bitset(LubBitset(*value));
+  }
+  static Type* CurrentOf(Handle<i::Object> value);
+
   bool Is(Type* that) { return (this == that) ? true : SlowIs(that); }
   bool Is(Handle<Type> that) { return this->Is(*that); }
   bool Maybe(Type* that);
@@ -163,8 +168,8 @@
 
   bool IsClass() { return is_class(); }
   bool IsConstant() { return is_constant(); }
-  Handle<Map> AsClass() { return as_class(); }
-  Handle<v8::internal::Object> AsConstant() { return as_constant(); }
+  Handle<i::Map> AsClass() { return as_class(); }
+  Handle<i::Object> AsConstant() { return as_constant(); }
 
   int NumClasses();
   int NumConstants();
@@ -191,16 +196,16 @@
     int index_;
   };
 
-  Iterator<Map> Classes() {
-    if (this->is_bitset()) return Iterator<Map>();
-    return Iterator<Map>(this->handle());
+  Iterator<i::Map> Classes() {
+    if (this->is_bitset()) return Iterator<i::Map>();
+    return Iterator<i::Map>(this->handle());
   }
-  Iterator<v8::internal::Object> Constants() {
-    if (this->is_bitset()) return Iterator<v8::internal::Object>();
-    return Iterator<v8::internal::Object>(this->handle());
+  Iterator<i::Object> Constants() {
+    if (this->is_bitset()) return Iterator<i::Object>();
+    return Iterator<i::Object>(this->handle());
   }
 
-  static Type* cast(v8::internal::Object* object) {
+  static Type* cast(i::Object* object) {
     Type* t = static_cast<Type*>(object);
     ASSERT(t->is_bitset() || t->is_class() ||
            t->is_constant() || t->is_union());
@@ -235,24 +240,24 @@
   bool SlowIs(Type* that);
 
   int as_bitset() { return Smi::cast(this)->value(); }
-  Handle<Map> as_class() { return Handle<Map>::cast(handle()); }
-  Handle<v8::internal::Object> as_constant() {
-    Handle<Box> box = Handle<Box>::cast(handle());
-    return v8::internal::handle(box->value(), box->GetIsolate());
+  Handle<i::Map> as_class() { return Handle<i::Map>::cast(handle()); }
+  Handle<i::Object> as_constant() {
+    Handle<i::Box> box = Handle<i::Box>::cast(handle());
+    return i::handle(box->value(), box->GetIsolate());
   }
   Handle<Unioned> as_union() { return Handle<Unioned>::cast(handle()); }
 
   Handle<Type> handle() { return handle_via_isolate_of(this); }
   Handle<Type> handle_via_isolate_of(Type* type) {
     ASSERT(type->IsHeapObject());
-    return v8::internal::handle(this, HeapObject::cast(type)->GetIsolate());
+    return i::handle(this, i::HeapObject::cast(type)->GetIsolate());
   }
 
   static Type* from_bitset(int bitset) {
-    return static_cast<Type*>(Object::cast(Smi::FromInt(bitset)));
+    return static_cast<Type*>(i::Object::cast(i::Smi::FromInt(bitset)));
   }
-  static Type* from_handle(Handle<HeapObject> handle) {
-    return static_cast<Type*>(Object::cast(*handle));
+  static Type* from_handle(Handle<i::HeapObject> handle) {
+    return static_cast<Type*>(i::Object::cast(*handle));
   }
 
   static Handle<Type> union_get(Handle<Unioned> unioned, int i) {
@@ -263,6 +268,10 @@
 
   int LubBitset();  // least upper bound that's a bitset
   int GlbBitset();  // greatest lower bound that's a bitset
+
+  static int LubBitset(i::Object* value);
+  static int LubBitset(i::Map* map);
+
   bool InUnion(Handle<Unioned> unioned, int current_size);
   int ExtendUnion(Handle<Unioned> unioned, int current_size);
   int ExtendIntersection(
diff --git a/src/typing.cc b/src/typing.cc
index 03c1ad1..84f5968 100644
--- a/src/typing.cc
+++ b/src/typing.cc
@@ -450,8 +450,7 @@
   Expression* callee = expr->expression();
   Property* prop = callee->AsProperty();
   if (prop != NULL) {
-    if (prop->key()->IsPropertyName())
-      expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
+    expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
   } else {
     expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
   }
diff --git a/src/version.cc b/src/version.cc
index 5b8b624..daa5e63 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     23
-#define BUILD_NUMBER      5
+#define BUILD_NUMBER      6
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index a3fb077..8712e27 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -114,6 +114,17 @@
 }
 
 
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { rdx, rax };
+  descriptor->register_param_count_ = 2;
+  descriptor->register_params_ = registers;
+  descriptor->deoptimization_handler_ =
+    FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
 void LoadFieldStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -134,6 +145,19 @@
 }
 
 
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+    Isolate* isolate,
+    CodeStubInterfaceDescriptor* descriptor) {
+  static Register registers[] = { rcx };
+  descriptor->register_param_count_ = 1;
+  descriptor->register_params_ = registers;
+  descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+  descriptor->deoptimization_handler_ =
+      FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
     Isolate* isolate,
     CodeStubInterfaceDescriptor* descriptor) {
@@ -5452,6 +5476,24 @@
 }
 
 
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+  __ movq(rdi, rax);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ movq(rax, MemOperand(rbp, parameter_count_offset));
+  // The parameter count above includes the receiver for the arguments passed to
+  // the deoptimization handler. Subtract the receiver for the parameter count
+  // for the call.
+  __ subl(rax, Immediate(1));
+  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  ParameterCount argument_count(rax);
+  __ InvokeFunction(
+      rdi, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     // It's always safe to call the entry hook stub, as the hook itself
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index dc6ddf9..c45f91e 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -415,11 +415,23 @@
 }
 
 
+static int ArgumentsOffsetWithoutFrame(int index) {
+  ASSERT(index < 0);
+  return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
 Operand LCodeGen::ToOperand(LOperand* op) const {
   // Does not handle registers. In X64 assembler, plain registers are not
   // representable as an Operand.
   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
-  return Operand(rbp, StackSlotOffset(op->index()));
+  if (NeedsEagerFrame()) {
+    return Operand(rbp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
 }
 
 
@@ -3910,7 +3922,12 @@
 
   int arity = instr->arity();
   CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ leave();
+    __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+  } else {
+    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  }
 }
 
 
@@ -5231,19 +5248,21 @@
     __ Push(Smi::FromInt(size));
   }
 
+  int flags = 0;
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
-    CallRuntimeFromDeferred(
-        Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
+    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
-    CallRuntimeFromDeferred(
-        Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
+    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
-    CallRuntimeFromDeferred(
-        Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   }
+  __ Push(Smi::FromInt(flags));
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(result, rax);
 }
 
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index a121646..c4cb642 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1323,8 +1323,10 @@
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
   LOperand* function = UseFixed(instr->function(), rdi);
-  LCallFunction* result = new(zone()) LCallFunction(context, function);
-  return MarkAsCall(DefineFixed(result, rax), instr);
+  LCallFunction* call = new(zone()) LCallFunction(context, function);
+  LInstruction* result = DefineFixed(call, rax);
+  if (instr->IsTailCall()) return result;
+  return MarkAsCall(result, instr);
 }
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index c36f6a6..fd857d3 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -3906,6 +3906,9 @@
 }
 
 
+// Compute the hash code from the untagged key.  This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
   // First of all we assign the hash seed to scratch.
   LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -3980,8 +3983,7 @@
   decl(r1);
 
   // Generate an unrolled loop that performs a few probes before giving up.
-  const int kProbes = 4;
-  for (int i = 0; i < kProbes; i++) {
+  for (int i = 0; i < kNumberDictionaryProbes; i++) {
     // Use r2 for index calculations and keep the hash intact in r0.
     movq(r2, r0);
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -3999,7 +4001,7 @@
                            r2,
                            times_pointer_size,
                            SeededNumberDictionary::kElementsStartOffset));
-    if (i != (kProbes - 1)) {
+    if (i != (kNumberDictionaryProbes - 1)) {
       j(equal, &done);
     } else {
       j(not_equal, miss);
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index e5d2c6b..23fe1c2 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -2230,3 +2230,84 @@
   CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
   heap_profiler->StopRecordingHeapAllocations();
 }
+
+
+static const char* inline_heap_allocation_source =
+"function f_0(x) {\n"
+"  return f_1(x+1);\n"
+"}\n"
+"%NeverOptimizeFunction(f_0);\n"
+"function f_1(x) {\n"
+"  return new f_2(x+1);\n"
+"}\n"
+"function f_2(x) {\n"
+"  this.foo = x;\n"
+"}\n"
+"var instances = [];\n"
+"function start() {\n"
+"  instances.push(f_0(0));\n"
+"}\n"
+"\n"
+"for (var i = 0; i < 100; i++) start();\n";
+
+
+TEST(TrackBumpPointerAllocations) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope scope(v8::Isolate::GetCurrent());
+  LocalContext env;
+
+  v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+  const char* names[] = { "(anonymous function)", "start", "f_0", "f_1" };
+  // First check that normally all allocations are recorded.
+  {
+    heap_profiler->StartRecordingHeapAllocations();
+
+    CompileRun(inline_heap_allocation_source);
+
+    const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot(
+        v8::String::New("Test2"));
+    i::HeapSnapshotsCollection* collection = ToInternal(snapshot)->collection();
+    AllocationTracker* tracker = collection->allocation_tracker();
+    CHECK_NE(NULL, tracker);
+    // Resolve all function locations.
+    tracker->PrepareForSerialization();
+    // Print for better diagnostics in case of failure.
+    tracker->trace_tree()->Print(tracker);
+
+    AllocationTraceNode* node =
+        FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+    CHECK_NE(NULL, node);
+    CHECK_GE(node->allocation_count(), 100);
+    CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
+    heap_profiler->StopRecordingHeapAllocations();
+  }
+
+  {
+    heap_profiler->StartRecordingHeapAllocations();
+
+    // Now check that not all allocations are tracked if we manually reenable
+    // inline allocations.
+    CHECK(CcTest::heap()->inline_allocation_disabled());
+    CcTest::heap()->EnableInlineAllocation();
+
+    CompileRun(inline_heap_allocation_source);
+
+    const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot(
+        v8::String::New("Test1"));
+    i::HeapSnapshotsCollection* collection = ToInternal(snapshot)->collection();
+    AllocationTracker* tracker = collection->allocation_tracker();
+    CHECK_NE(NULL, tracker);
+    // Resolve all function locations.
+    tracker->PrepareForSerialization();
+    // Print for better diagnostics in case of failure.
+    tracker->trace_tree()->Print(tracker);
+
+    AllocationTraceNode* node =
+        FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+    CHECK_NE(NULL, node);
+    CHECK_LT(node->allocation_count(), 100);
+
+    CcTest::heap()->DisableInlineAllocation();
+    heap_profiler->StopRecordingHeapAllocations();
+  }
+}
diff --git a/test/mjsunit/allocation-folding.js b/test/mjsunit/allocation-folding.js
index ec07392..a914b59 100644
--- a/test/mjsunit/allocation-folding.js
+++ b/test/mjsunit/allocation-folding.js
@@ -100,3 +100,17 @@
 
 assertEquals(result[1], 4);
 assertEquals(result2[1], 6);
+
+// Test to exceed the Heap::MaxRegularSpaceAllocationSize limit but not
+// the Page::kMaxNonCodeHeapObjectSize limit with allocation folding.
+
+function boom() {
+  var a1 = new Array(84632);
+  var a2 = new Array(84632);
+  var a3 = new Array(84632);
+  return [ a1, a2, a3 ];
+}
+
+boom(); boom(); boom();
+%OptimizeFunctionOnNextCall(boom);
+boom();
diff --git a/test/mjsunit/keyed-array-call.js b/test/mjsunit/keyed-array-call.js
new file mode 100644
index 0000000..b97da3c
--- /dev/null
+++ b/test/mjsunit/keyed-array-call.js
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var a = [function(a) { return a+10; },
+         function(a) { return a+20; }];
+a.__proto__.test = function(a) { return a+30; }
+function f(i) {
+  return "r" + (1, a[i](i+1), a[i](i+2));
+}
+
+assertEquals("r12", f(0));
+assertEquals("r12", f(0));
+assertEquals("r23", f(1));
+assertEquals("r23", f(1));
+
+// Deopt the stub.
+assertEquals("rtest230", f("test"));
+
+var a2 = [function(a) { return a+10; },,
+          function(a) { return a+20; }];
+a2.__proto__.test = function(a) { return a+30; }
+function f2(i) {
+  return "r" + (1, a2[i](i+1), a2[i](i+2));
+}
+
+assertEquals("r12", f2(0));
+assertEquals("r12", f2(0));
+assertEquals("r24", f2(2));
+assertEquals("r24", f2(2));
+
+// Deopt the stub. This will throw given that undefined is not a function.
+assertThrows(function() { f2(1) });
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index d975f2b..5134313 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -99,6 +99,10 @@
   ##############################################################################
   # Long running test that reproduces memory leak and should be run manually.
   'regress/regress-2073': [SKIP],
+
+  ##############################################################################
+  # Needs to allocate 1Gb of memory.
+  'regress/regress-319722-ArrayBuffer': [PASS, ['system == windows or system == macos or arch == arm or arch == android_arm or arch == android_ia32', SKIP]],
 }],  # ALWAYS
 
 ##############################################################################
diff --git a/test/mjsunit/regress/clear-keyed-call.js b/test/mjsunit/regress/clear-keyed-call.js
new file mode 100644
index 0000000..6870f60
--- /dev/null
+++ b/test/mjsunit/regress/clear-keyed-call.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+function f(a) {
+  a[0](1);
+}
+
+f([function(a) { return a; }]);
+f([function(a) { return a; }]);
+f([function(a) { return a; }]);
+%NotifyContextDisposed();
+gc();
+gc();
+gc();
diff --git a/test/mjsunit/regress/regress-319722-ArrayBuffer.js b/test/mjsunit/regress/regress-319722-ArrayBuffer.js
new file mode 100644
index 0000000..c8aed9e
--- /dev/null
+++ b/test/mjsunit/regress/regress-319722-ArrayBuffer.js
@@ -0,0 +1,47 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --nostress-opt --allow-natives-syntax
+var maxSize = %MaxSmi() + 1;
+var ab = new ArrayBuffer(maxSize);
+
+function TestArray(constr) {
+  assertThrows(function() {
+    new constr(ab, 0, maxSize);
+  }, RangeError);
+}
+
+TestArray(Uint8Array);
+TestArray(Int8Array);
+TestArray(Uint16Array);
+TestArray(Int16Array);
+TestArray(Uint32Array);
+TestArray(Int32Array);
+TestArray(Float32Array);
+TestArray(Float64Array);
+TestArray(Uint8ClampedArray);
+
diff --git a/test/mjsunit/regress/regress-319722-TypedArrays.js b/test/mjsunit/regress/regress-319722-TypedArrays.js
new file mode 100644
index 0000000..0445e2d
--- /dev/null
+++ b/test/mjsunit/regress/regress-319722-TypedArrays.js
@@ -0,0 +1,45 @@
+
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --nostress-opt --allow-natives-syntax
+var maxSize = %MaxSmi() + 1;
+function TestArray(constr) {
+  assertThrows(function() {
+    new constr(maxSize);
+  }, RangeError);
+}
+
+TestArray(Uint8Array);
+TestArray(Int8Array);
+TestArray(Uint16Array);
+TestArray(Int16Array);
+TestArray(Uint32Array);
+TestArray(Int32Array);
+TestArray(Float32Array);
+TestArray(Float64Array);
+TestArray(Uint8ClampedArray);
diff --git a/test/mjsunit/regress/regress-crbug-319835.js b/test/mjsunit/regress/regress-crbug-319835.js
new file mode 100644
index 0000000..48f871f
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-319835.js
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+try {} catch(e) {}  // No need to optimize the top level.
+
+var size = 0x20000;
+var a = new Float64Array(size);
+var training = new Float64Array(10);
+function store(a, index) {
+  var offset = 0x20000000;
+  for (var i = 0; i < 1; i++) {
+    a[index + offset] = 0xcc;
+  }
+}
+
+store(training, -0x20000000);
+store(training, -0x20000000 + 1);
+store(training, -0x20000000);
+store(training, -0x20000000 + 1);
+%OptimizeFunctionOnNextCall(store);
+
+// Segfault maybe?
+for (var i = -0x20000000; i < -0x20000000 + size; i++) {
+  store(a, i);
+}
diff --git a/test/mjsunit/regress/regress-crbug-319860.js b/test/mjsunit/regress/regress-crbug-319860.js
new file mode 100644
index 0000000..b81fb85
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-319860.js
@@ -0,0 +1,47 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function read(a, index) {
+  var offset = 0x2000000;
+  var result;
+  for (var i = 0; i < 1; i++) {
+    result = a[index + offset];
+  }
+  return result;
+}
+
+var a = new Int8Array(0x2000001);
+read(a, 0);
+read(a, 0);
+%OptimizeFunctionOnNextCall(read);
+
+// Segfault maybe?
+for (var i = 0; i > -1000000; --i) {
+  read(a, i);
+}