Version 3.21.11

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@16560 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 61cfa62..d39b110 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-09-05: Version 3.21.11
+
+        Performance and stability improvements on all platforms.
+
+
 2013-09-04: Version 3.21.10
 
         Fixed Eternal::IsEmpty logic (issue 2870).
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index 65a2f9a..d7350db 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -57,6 +57,11 @@
    */
   int GetLineNumber() const;
 
+  /** Returns bailout reason for the function
+    * if the optimization was disabled for it.
+    */
+  const char* GetBailoutReason() const;
+
   /** DEPRECATED. Please use GetHitCount instead.
     * Returns the count of samples where function was currently executing.
     */
diff --git a/src/api.cc b/src/api.cc
index c95def1..ad75715 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1877,8 +1877,8 @@
     EXCEPTION_PREAMBLE(isolate);
     i::Handle<i::Object> receiver(
         isolate->context()->global_proxy(), isolate);
-    i::Handle<i::Object> result =
-        i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+    i::Handle<i::Object> result = i::Execution::Call(
+        isolate, fun, receiver, 0, NULL, &has_pending_exception);
     EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
     raw_result = *result;
   }
@@ -2177,8 +2177,8 @@
       isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
   i::Handle<i::JSFunction> fun =
       i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
-  i::Handle<i::Object> value =
-      i::Execution::Call(fun, recv, argc, argv, has_pending_exception);
+  i::Handle<i::Object> value = i::Execution::Call(
+      isolate, fun, recv, argc, argv, has_pending_exception);
   return value;
 }
 
@@ -4143,8 +4143,8 @@
     recv_obj = obj;
   }
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> returned =
-      i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+  i::Handle<i::Object> returned = i::Execution::Call(
+      isolate, fun, recv_obj, argc, args, &has_pending_exception);
   EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
   return Utils::ToLocal(scope.CloseAndEscape(returned));
 }
@@ -4179,8 +4179,8 @@
   if (!delegate->IsUndefined()) {
     i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
     EXCEPTION_PREAMBLE(isolate);
-    i::Handle<i::Object> returned =
-        i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
+    i::Handle<i::Object> returned = i::Execution::Call(
+        isolate, fun, obj, argc, args, &has_pending_exception);
     EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
     ASSERT(!delegate->IsUndefined());
     return Utils::ToLocal(scope.CloseAndEscape(returned));
@@ -4231,8 +4231,8 @@
     STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
     i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
     EXCEPTION_PREAMBLE(isolate);
-    i::Handle<i::Object> returned =
-        i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+    i::Handle<i::Object> returned = i::Execution::Call(
+        isolate, fun, recv_obj, argc, args, &has_pending_exception);
     EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
     raw_result = *returned;
   }
@@ -7255,6 +7255,12 @@
 }
 
 
+const char* CpuProfileNode::GetBailoutReason() const {
+  const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+  return node->entry()->bailout_reason();
+}
+
+
 double CpuProfileNode::GetSelfSamplesCount() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 7605525..6ee4110 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1292,7 +1292,7 @@
     Handle<JSObject> holder,
     Handle<Name> name,
     Label* success,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<Object> callback) {
   Label miss;
 
   Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1380,6 +1380,24 @@
 
 
 void BaseLoadStubCompiler::GenerateLoadCallback(
+    const CallOptimization& call_optimization) {
+  ASSERT(call_optimization.is_simple_api_call());
+
+  // Assign stack space for the call arguments.
+  __ sub(sp, sp, Operand((kFastApiCallArguments + 1) * kPointerSize));
+
+  int argc = 0;
+  int api_call_argc = argc + kFastApiCallArguments;
+  // Write holder to stack frame.
+  __ str(receiver(), MemOperand(sp, 0));
+  // Write receiver to stack frame.
+  __ str(receiver(), MemOperand(sp, api_call_argc * kPointerSize));
+
+  GenerateFastApiDirectCall(masm(), call_optimization, argc);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
     Register reg,
     Handle<ExecutableAccessorInfo> callback) {
   // Build AccessorInfo::args_ list on the stack and push property name below
diff --git a/src/ast.cc b/src/ast.cc
index b966cc3..cbadb75 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1085,7 +1085,7 @@
 #define DONT_OPTIMIZE_NODE(NodeType) \
   void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
     increase_node_count(); \
-    add_flag(kDontOptimize); \
+    set_dont_optimize_reason(k##NodeType); \
     add_flag(kDontInline); \
     add_flag(kDontSelfOptimize); \
   }
@@ -1097,7 +1097,7 @@
 #define DONT_CACHE_NODE(NodeType) \
   void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
     increase_node_count(); \
-    add_flag(kDontOptimize); \
+    set_dont_optimize_reason(k##NodeType); \
     add_flag(kDontInline); \
     add_flag(kDontSelfOptimize); \
     add_flag(kDontCache); \
diff --git a/src/ast.h b/src/ast.h
index 0f01a3e..d0454fb 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -171,7 +171,6 @@
 
 enum AstPropertiesFlag {
   kDontInline,
-  kDontOptimize,
   kDontSelfOptimize,
   kDontSoftInline,
   kDontCache
@@ -2316,6 +2315,12 @@
     ast_properties_ = *ast_properties;
   }
 
+  bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
+  BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+  void set_dont_optimize_reason(BailoutReason reason) {
+    dont_optimize_reason_ = reason;
+  }
+
  protected:
   FunctionLiteral(Isolate* isolate,
                   Handle<String> name,
@@ -2335,6 +2340,7 @@
         scope_(scope),
         body_(body),
         inferred_name_(isolate->factory()->empty_string()),
+        dont_optimize_reason_(kNoReason),
         materialized_literal_count_(materialized_literal_count),
         expected_property_count_(expected_property_count),
         handler_count_(handler_count),
@@ -2356,6 +2362,7 @@
   ZoneList<Statement*>* body_;
   Handle<String> inferred_name_;
   AstProperties ast_properties_;
+  BailoutReason dont_optimize_reason_;
 
   int materialized_literal_count_;
   int expected_property_count_;
@@ -2830,9 +2837,10 @@
 
 class AstConstructionVisitor BASE_EMBEDDED {
  public:
-  AstConstructionVisitor() { }
+  AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
 
   AstProperties* ast_properties() { return &properties_; }
+  BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
 
  private:
   template<class> friend class AstNodeFactory;
@@ -2845,8 +2853,12 @@
 
   void increase_node_count() { properties_.add_node_count(1); }
   void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
+  void set_dont_optimize_reason(BailoutReason reason) {
+      dont_optimize_reason_ = reason;
+  }
 
   AstProperties properties_;
+  BailoutReason dont_optimize_reason_;
 };
 
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index b7991d3..92ff269 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1552,7 +1552,7 @@
                      : top_context->global_object(),
                      isolate);
   bool has_pending_exception;
-  Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+  Execution::Call(isolate, fun, receiver, 0, NULL, &has_pending_exception);
   if (has_pending_exception) return false;
   return true;
 }
diff --git a/src/builtins.cc b/src/builtins.cc
index b481483..b7fd83e 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -132,7 +132,6 @@
   MUST_USE_RESULT static MaybeObject* Builtin_##name(            \
       int args_length, Object** args_object, Isolate* isolate) { \
     name##ArgumentsType args(args_length, args_object);          \
-    ASSERT(isolate == Isolate::Current());                       \
     args.Verify();                                               \
     return Builtin_Impl_##name(args, isolate);                   \
   }                                                              \
@@ -448,7 +447,8 @@
     argv[i] = args.at<Object>(i + 1);
   }
   bool pending_exception;
-  Handle<Object> result = Execution::Call(function,
+  Handle<Object> result = Execution::Call(isolate,
+                                          function,
                                           args.receiver(),
                                           argc,
                                           argv.start(),
diff --git a/src/circular-queue-inl.h b/src/circular-queue-inl.h
index 8b09eeb..dfb7031 100644
--- a/src/circular-queue-inl.h
+++ b/src/circular-queue-inl.h
@@ -46,7 +46,7 @@
 
 
 template<typename T, unsigned L>
-T* SamplingCircularQueue<T, L>::StartDequeue() {
+T* SamplingCircularQueue<T, L>::Peek() {
   MemoryBarrier();
   if (Acquire_Load(&dequeue_pos_->marker) == kFull) {
     return &dequeue_pos_->record;
@@ -56,7 +56,7 @@
 
 
 template<typename T, unsigned L>
-void SamplingCircularQueue<T, L>::FinishDequeue() {
+void SamplingCircularQueue<T, L>::Remove() {
   Release_Store(&dequeue_pos_->marker, kEmpty);
   dequeue_pos_ = Next(dequeue_pos_);
 }
diff --git a/src/circular-queue.h b/src/circular-queue.h
index c7797b3..94bc89e 100644
--- a/src/circular-queue.h
+++ b/src/circular-queue.h
@@ -55,12 +55,11 @@
   void FinishEnqueue();
 
   // Executed on the consumer (analyzer) thread.
-  // StartDequeue returns a pointer to a memory location for retrieving
-  // the next record. After the record had been read by a consumer,
-  // FinishDequeue must be called. Until that moment, subsequent calls
-  // to StartDequeue will return the same pointer.
-  T* StartDequeue();
-  void FinishDequeue();
+  // Retrieves, but does not remove, the head of this queue, returning NULL
+  // if this queue is empty. After the record had been read by a consumer,
+  // Remove must be called.
+  T* Peek();
+  void Remove();
 
  private:
   // Reserved values for the entry marker.
diff --git a/src/compiler.cc b/src/compiler.cc
index 2bf012e..a206066 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -230,7 +230,7 @@
   return FLAG_self_optimization &&
       FLAG_crankshaft &&
       !function()->flags()->Contains(kDontSelfOptimize) &&
-      !function()->flags()->Contains(kDontOptimize) &&
+      !function()->dont_optimize() &&
       function()->scope()->AllowsLazyCompilation() &&
       (shared_info().is_null() || !shared_info()->optimization_disabled());
 }
@@ -840,7 +840,7 @@
 
   // Check the function has compiled code.
   ASSERT(shared->is_compiled());
-  shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+  shared->set_dont_optimize_reason(lit->dont_optimize_reason());
   shared->set_dont_inline(lit->flags()->Contains(kDontInline));
   shared->set_ast_node_count(lit->ast_node_count());
 
@@ -1360,7 +1360,7 @@
   function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
   function_info->set_ast_node_count(lit->ast_node_count());
   function_info->set_is_function(lit->is_function());
-  function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+  function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
   function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
   function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
   function_info->set_is_generator(lit->is_generator());
diff --git a/src/compiler.h b/src/compiler.h
index 98b93d3..65618dd 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -60,11 +60,10 @@
   CompilationInfo(Handle<JSFunction> closure, Zone* zone);
   virtual ~CompilationInfo();
 
-  Isolate* isolate() {
-    ASSERT(Isolate::Current() == isolate_);
+  Isolate* isolate() const {
     return isolate_;
   }
-  Zone* zone() { return zone_; }
+  Zone* zone() const { return zone_; }
   bool is_lazy() const { return IsLazy::decode(flags_); }
   bool is_eval() const { return IsEval::decode(flags_); }
   bool is_global() const { return IsGlobal::decode(flags_); }
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 35fe788..6e93b64 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -114,11 +114,11 @@
       generator_->RecordTickSample(record.sample);
     }
 
-    const TickSampleEventRecord* record = ticks_buffer_.StartDequeue();
+    const TickSampleEventRecord* record = ticks_buffer_.Peek();
     if (record == NULL) return !ticks_from_vm_buffer_.IsEmpty();
     if (record->order != last_processed_code_event_id_) return true;
     generator_->RecordTickSample(record->sample);
-    ticks_buffer_.FinishDequeue();
+    ticks_buffer_.Remove();
   }
 }
 
@@ -243,6 +243,8 @@
     ASSERT(Script::cast(shared->script()));
     Script* script = Script::cast(shared->script());
     rec->entry->set_script_id(script->id()->value());
+    rec->entry->set_bailout_reason(
+        GetBailoutReason(shared->DisableOptimizationReason()));
   }
   rec->size = code->ExecutableSize();
   rec->shared = shared->address();
@@ -273,6 +275,8 @@
   rec->entry->set_script_id(script->id()->value());
   rec->size = code->ExecutableSize();
   rec->shared = shared->address();
+  rec->entry->set_bailout_reason(
+      GetBailoutReason(shared->DisableOptimizationReason()));
   processor_->Enqueue(evt_rec);
 }
 
diff --git a/src/debug.cc b/src/debug.cc
index c3d737e..c82a2aa 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -3404,6 +3404,7 @@
 
   Handle<Object> argv[] = { exec_state, data };
   Handle<Object> result = Execution::Call(
+      isolate_,
       fun,
       Handle<Object>(isolate_->debug()->debug_context_->global_proxy(),
                      isolate_),
@@ -3421,7 +3422,6 @@
 
 bool Debugger::StartAgent(const char* name, int port,
                           bool wait_for_connection) {
-  ASSERT(Isolate::Current() == isolate_);
   if (wait_for_connection) {
     // Suspend V8 if it is already running or set V8 to suspend whenever
     // it starts.
@@ -3442,7 +3442,6 @@
 
 
 void Debugger::StopAgent() {
-  ASSERT(Isolate::Current() == isolate_);
   if (agent_ != NULL) {
     agent_->Shutdown();
     agent_->Join();
@@ -3453,7 +3452,6 @@
 
 
 void Debugger::WaitForAgent() {
-  ASSERT(Isolate::Current() == isolate_);
   if (agent_ != NULL)
     agent_->WaitUntilListening();
 }
@@ -3507,7 +3505,6 @@
 
 
 EnterDebugger::~EnterDebugger() {
-  ASSERT(Isolate::Current() == isolate_);
   Debug* debug = isolate_->debug();
 
   // Restore to the previous break state.
diff --git a/src/debug.h b/src/debug.h
index 0c24b90..85f7583 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -990,7 +990,6 @@
     isolate_->debug()->set_disable_break(disable_break);
   }
   ~DisableBreak() {
-    ASSERT(Isolate::Current() == isolate_);
     isolate_->debug()->set_disable_break(prev_disable_break_);
   }
 
diff --git a/src/execution.cc b/src/execution.cc
index 048e6e0..91d340e 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -148,7 +148,8 @@
 }
 
 
-Handle<Object> Execution::Call(Handle<Object> callable,
+Handle<Object> Execution::Call(Isolate* isolate,
+                               Handle<Object> callable,
                                Handle<Object> receiver,
                                int argc,
                                Handle<Object> argv[],
@@ -156,7 +157,6 @@
                                bool convert_receiver) {
   *pending_exception = false;
 
-  Isolate* isolate = Isolate::Current();
   if (!callable->IsJSFunction()) {
     callable = TryGetFunctionDelegate(isolate, callable, pending_exception);
     if (*pending_exception) return callable;
@@ -599,7 +599,8 @@
   do {                                                                  \
     Handle<Object> argv[] = args;                                       \
     ASSERT(has_pending_exception != NULL);                              \
-    return Call(isolate->name##_fun(),                                  \
+    return Call(isolate,                                                \
+                isolate->name##_fun(),                                  \
                 isolate->js_builtins_object(),                          \
                 ARRAY_SIZE(argv), argv,                                 \
                 has_pending_exception);                                 \
@@ -712,7 +713,8 @@
   if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
   // The function has not yet been instantiated in this context; do it.
   Handle<Object> args[] = { data };
-  Handle<Object> result = Call(isolate->instantiate_fun(),
+  Handle<Object> result = Call(isolate,
+                               isolate->instantiate_fun(),
                                isolate->js_builtins_object(),
                                ARRAY_SIZE(args),
                                args,
@@ -744,7 +746,8 @@
     return Handle<JSObject>(JSObject::cast(result));
   } else {
     Handle<Object> args[] = { data };
-    Handle<Object> result = Call(isolate->instantiate_fun(),
+    Handle<Object> result = Call(isolate,
+                                 isolate->instantiate_fun(),
                                  isolate->js_builtins_object(),
                                  ARRAY_SIZE(args),
                                  args,
@@ -760,7 +763,8 @@
                                   Handle<Object> instance_template,
                                   bool* exc) {
   Handle<Object> args[] = { instance, instance_template };
-  Execution::Call(isolate->configure_instance_fun(),
+  Execution::Call(isolate,
+                  isolate->configure_instance_fun(),
                   isolate->js_builtins_object(),
                   ARRAY_SIZE(args),
                   args,
diff --git a/src/execution.h b/src/execution.h
index 1a9a66c..0c1bf94 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -62,7 +62,8 @@
   // and the function called is not in strict mode, receiver is converted to
   // an object.
   //
-  static Handle<Object> Call(Handle<Object> callable,
+  static Handle<Object> Call(Isolate* isolate,
+                             Handle<Object> callable,
                              Handle<Object> receiver,
                              int argc,
                              Handle<Object> argv[],
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index f6a36f5..91a5173 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -333,7 +333,7 @@
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
   Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
   code->set_optimizable(info->IsOptimizable() &&
-                        !info->function()->flags()->Contains(kDontOptimize) &&
+                        !info->function()->dont_optimize() &&
                         info->function()->scope()->AllowsLazyCompilation());
   cgen.PopulateDeoptimizationData(code);
   cgen.PopulateTypeFeedbackInfo(code);
diff --git a/src/handles.cc b/src/handles.cc
index 1ac77cc..b3704df 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -615,8 +615,12 @@
     if (p->IsJSProxy()) {
       Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
       Handle<Object> args[] = { proxy };
-      Handle<Object> names = Execution::Call(
-          isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
+      Handle<Object> names = Execution::Call(isolate,
+                                             isolate->proxy_enumerate(),
+                                             object,
+                                             ARRAY_SIZE(args),
+                                             args,
+                                             threw);
       if (*threw) return content;
       content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
       break;
diff --git a/src/heap.cc b/src/heap.cc
index 037a7a1..aaa3d8b 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -3681,7 +3681,7 @@
   share->set_function_token_position(0);
   // All compiler hints default to false or 0.
   share->set_compiler_hints(0);
-  share->set_opt_count(0);
+  share->set_opt_count_and_bailout_reason(0);
 
   return share;
 }
@@ -6204,7 +6204,7 @@
 
 
 bool Heap::Contains(Address addr) {
-  if (OS::IsOutsideAllocatedSpace(addr)) return false;
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
   return HasBeenSetUp() &&
     (new_space_.ToSpaceContains(addr) ||
      old_pointer_space_->Contains(addr) ||
@@ -6223,7 +6223,7 @@
 
 
 bool Heap::InSpace(Address addr, AllocationSpace space) {
-  if (OS::IsOutsideAllocatedSpace(addr)) return false;
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
   if (!HasBeenSetUp()) return false;
 
   switch (space) {
diff --git a/src/hydrogen-infer-representation.cc b/src/hydrogen-infer-representation.cc
index 1b3ab6f..f61649a 100644
--- a/src/hydrogen-infer-representation.cc
+++ b/src/hydrogen-infer-representation.cc
@@ -152,8 +152,8 @@
   // Do a fixed point iteration, trying to improve representations
   while (!worklist_.is_empty()) {
     HValue* current = worklist_.RemoveLast();
-    in_worklist_.Remove(current->id());
     current->InferRepresentation(this);
+    in_worklist_.Remove(current->id());
   }
 
   // Lastly: any instruction that we don't have representation information
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index e371277..cd8755a 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -3993,6 +3993,9 @@
 Representation HValue::RepresentationFromUseRequirements() {
   Representation rep = Representation::None();
   for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    // Ignore the use requirement from never run code
+    if (it.value()->block()->IsDeoptimizing()) continue;
+
     // We check for observed_input_representation elsewhere.
     Representation use_rep =
         it.value()->RequiredInputRepresentation(it.index());
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 25522e8..d6b8d64 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -4911,8 +4911,8 @@
       Add<HDeoptimize>("Insufficient type feedback for property assignment",
                        Deoptimizer::SOFT);
     }
-    return BuildStoreNamed(expr, expr->id(), expr->position(),
-                           expr->AssignmentId(), prop, object, value);
+    return BuildStoreNamed(
+        expr, expr->id(), expr->AssignmentId(), prop, object, value);
   } else {
     // Keyed store.
     CHECK_ALIVE(VisitForValue(prop->key()));
@@ -4980,7 +4980,6 @@
 
 void HOptimizedGraphBuilder::BuildStoreNamed(Expression* expr,
                                              BailoutId id,
-                                             int position,
                                              BailoutId assignment_id,
                                              Property* prop,
                                              HValue* object,
@@ -5020,14 +5019,14 @@
   } else if (types != NULL && types->length() > 1) {
     Drop(2);
     return HandlePolymorphicStoreNamedField(
-        position, id, object, value, types, name);
+        expr->position(), id, object, value, types, name);
   } else {
     Drop(2);
     instr = BuildStoreNamedGeneric(object, name, value);
   }
 
   if (!ast_context()->IsEffect()) Push(value);
-  instr->set_position(position);
+  instr->set_position(expr->position());
   AddInstruction(instr);
   if (instr->HasObservableSideEffects()) {
     Add<HSimulate>(id, REMOVABLE_SIMULATE);
@@ -5122,7 +5121,7 @@
       // Named property.
       CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* object = Top();
-      PushLoad(prop, object, expr->position());
+      CHECK_ALIVE(PushLoad(prop, object, expr->position()));
 
       CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
@@ -5134,8 +5133,8 @@
         Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
       }
 
-      return BuildStoreNamed(expr, expr->id(), expr->position(),
-                             expr->AssignmentId(), prop, object, instr);
+      return BuildStoreNamed(
+          expr, expr->id(), expr->AssignmentId(), prop, object, instr);
     } else {
       // Keyed property.
       CHECK_ALIVE(VisitForValue(prop->obj()));
@@ -6358,7 +6357,7 @@
     return false;
   }
   AstProperties::Flags* flags(function->flags());
-  if (flags->Contains(kDontInline) || flags->Contains(kDontOptimize)) {
+  if (flags->Contains(kDontInline) || function->dont_optimize()) {
     TraceInline(target, caller, "target contains unsupported syntax [late]");
     return false;
   }
@@ -7475,6 +7474,19 @@
 }
 
 
+void HOptimizedGraphBuilder::BuildStoreInEffect(Expression* expr,
+                                                Property* prop,
+                                                BailoutId ast_id,
+                                                BailoutId return_id,
+                                                HValue* object,
+                                                HValue* value) {
+  EffectContext for_effect(this);
+  Push(object);
+  Push(value);
+  BuildStoreNamed(expr, ast_id, return_id, prop, object, value);
+}
+
+
 void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
@@ -7561,7 +7573,7 @@
 
       CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* object = Top();
-      PushLoad(prop, object, expr->position());
+      CHECK_ALIVE(PushLoad(prop, object, expr->position()));
 
       after = BuildIncrement(returns_original_input, expr);
 
@@ -7569,17 +7581,13 @@
         HValue* result = Pop();
         HValue* object = Pop();
         environment()->SetExpressionStackAt(0, result);
-        {
-          EffectContext for_effect(this);
-          Push(object);
-          Push(after);
-          return BuildStoreNamed(expr, expr->id(), expr->position(),
-                                 expr->AssignmentId(), prop, object, after);
-        }
+        CHECK_ALIVE(BuildStoreInEffect(
+            expr, prop, expr->id(), expr->AssignmentId(), object, after));
+        return ast_context()->ReturnValue(Pop());
       }
 
-      return BuildStoreNamed(expr, expr->id(), expr->position(),
-                             expr->AssignmentId(), prop, object, after);
+      return BuildStoreNamed(
+          expr, expr->id(), expr->AssignmentId(), prop, object, after);
     } else {
       // Keyed property.
       if (returns_original_input) Push(graph()->GetConstantUndefined());
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 0e1ed13..a12773f 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -2040,9 +2040,15 @@
                 HValue* object,
                 int position);
 
+  void BuildStoreInEffect(Expression* expression,
+                          Property* prop,
+                          BailoutId ast_id,
+                          BailoutId return_id,
+                          HValue* object,
+                          HValue* value);
+
   void BuildStoreNamed(Expression* expression,
                        BailoutId id,
-                       int position,
                        BailoutId assignment_id,
                        Property* prop,
                        HValue* object,
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 9aa7bae..e90810e 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1261,7 +1261,7 @@
     Handle<JSObject> holder,
     Handle<Name> name,
     Label* success,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<Object> callback) {
   Label miss;
 
   Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1355,6 +1355,28 @@
 
 
 void BaseLoadStubCompiler::GenerateLoadCallback(
+    const CallOptimization& call_optimization) {
+  ASSERT(call_optimization.is_simple_api_call());
+
+  // Copy return value.
+  __ mov(scratch3(), Operand(esp, 0));
+  // Assign stack space for the call arguments.
+  __ sub(esp, Immediate((kFastApiCallArguments + 1) * kPointerSize));
+  // Move the return address on top of the stack.
+  __ mov(Operand(esp, 0), scratch3());
+
+  int argc = 0;
+  int api_call_argc = argc + kFastApiCallArguments;
+  // Write holder to stack frame.
+  __ mov(Operand(esp, 1 * kPointerSize), receiver());
+  // Write receiver to stack frame.
+  __ mov(Operand(esp, (api_call_argc + 1) * kPointerSize), receiver());
+
+  GenerateFastApiCall(masm(), call_optimization, argc);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
     Register reg,
     Handle<ExecutableAccessorInfo> callback) {
   // Insert additional parameters into the stack frame above return address.
diff --git a/src/ic-inl.h b/src/ic-inl.h
index ca02183..e6ff2da 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -43,7 +43,6 @@
   Address result = Assembler::target_address_from_return_address(pc());
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  ASSERT(Isolate::Current() == isolate());
   Debug* debug = isolate()->debug();
   // First check if any break points are active if not just return the address
   // of the call.
diff --git a/src/ic.cc b/src/ic.cc
index 190fdb1..d88ab4a 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1357,8 +1357,15 @@
         if (!getter->IsJSFunction()) break;
         if (holder->IsGlobalObject()) break;
         if (!holder->HasFastProperties()) break;
+        Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+        CallOptimization call_optimization(function);
+        if (call_optimization.is_simple_api_call() &&
+            call_optimization.IsCompatibleReceiver(*receiver)) {
+          return isolate()->stub_cache()->ComputeLoadCallback(
+              name, receiver, holder, call_optimization);
+        }
         return isolate()->stub_cache()->ComputeLoadViaGetter(
-            name, receiver, holder, Handle<JSFunction>::cast(getter));
+            name, receiver, holder, function);
       } else if (receiver->IsJSArray() &&
           name->Equals(isolate()->heap()->length_string())) {
         PropertyIndex lengthIndex =
@@ -1544,13 +1551,29 @@
     case CALLBACKS: {
       Handle<Object> callback_object(lookup->GetCallbackObject(), isolate());
       // TODO(dcarney): Handle DeclaredAccessorInfo correctly.
-      if (!callback_object->IsExecutableAccessorInfo()) break;
-      Handle<ExecutableAccessorInfo> callback =
-          Handle<ExecutableAccessorInfo>::cast(callback_object);
-      if (v8::ToCData<Address>(callback->getter()) == 0) break;
-      if (!callback->IsCompatibleReceiver(*receiver)) break;
-      return isolate()->stub_cache()->ComputeKeyedLoadCallback(
-          name, receiver, holder, callback);
+      if (callback_object->IsExecutableAccessorInfo()) {
+        Handle<ExecutableAccessorInfo> callback =
+            Handle<ExecutableAccessorInfo>::cast(callback_object);
+        if (v8::ToCData<Address>(callback->getter()) == 0) break;
+        if (!callback->IsCompatibleReceiver(*receiver)) break;
+        return isolate()->stub_cache()->ComputeKeyedLoadCallback(
+            name, receiver, holder, callback);
+      } else if (callback_object->IsAccessorPair()) {
+        Handle<Object> getter(
+            Handle<AccessorPair>::cast(callback_object)->getter(),
+            isolate());
+        if (!getter->IsJSFunction()) break;
+        if (holder->IsGlobalObject()) break;
+        if (!holder->HasFastProperties()) break;
+        Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+        CallOptimization call_optimization(function);
+        if (call_optimization.is_simple_api_call() &&
+            call_optimization.IsCompatibleReceiver(*receiver)) {
+          return isolate()->stub_cache()->ComputeKeyedLoadCallback(
+              name, receiver, holder, call_optimization);
+        }
+      }
+      break;
     }
     case INTERCEPTOR:
       ASSERT(HasInterceptorGetter(lookup->holder()));
@@ -2769,7 +2792,8 @@
 
   bool caught_exception;
   Handle<Object> builtin_args[] = { right };
-  Handle<Object> result = Execution::Call(builtin_function,
+  Handle<Object> result = Execution::Call(isolate,
+                                          builtin_function,
                                           left,
                                           ARRAY_SIZE(builtin_args),
                                           builtin_args,
diff --git a/src/isolate.cc b/src/isolate.cc
index 2c15717..17762b2 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -2145,7 +2145,6 @@
 
 bool Isolate::Init(Deserializer* des) {
   ASSERT(state_ != INITIALIZED);
-  ASSERT(Isolate::Current() == this);
   TRACE_ISOLATE(init);
 
   stress_deopt_count_ = FLAG_deopt_every_n_times;
@@ -2174,8 +2173,7 @@
   memory_allocator_ = new MemoryAllocator(this);
   code_range_ = new CodeRange(this);
 
-  // Safe after setting Heap::isolate_, initializing StackGuard and
-  // ensuring that Isolate::Current() == this.
+  // Safe after setting Heap::isolate_, and initializing StackGuard
   heap_.SetStackLimits();
 
 #define ASSIGN_ELEMENT(CamelName, hacker_name)                  \
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index c6ea288..155ae5f 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -367,7 +367,7 @@
   Handle<Object> argv[] = { key };
   bool has_exception = false;
   HandleScope scope(isolate_);
-  object = Execution::Call(fun, object, 1, argv, &has_exception);
+  object = Execution::Call(isolate_, fun, object, 1, argv, &has_exception);
   // Return empty handle to signal an exception.
   if (has_exception) return Handle<Object>::null();
   return scope.CloseAndEscape(object);
@@ -470,7 +470,7 @@
   Handle<Object> argv[] = { key, object };
   bool has_exception = false;
   Handle<Object> result =
-      Execution::Call(builtin, object, 2, argv, &has_exception);
+      Execution::Call(isolate_, builtin, object, 2, argv, &has_exception);
   if (has_exception) return EXCEPTION;
   if (result->IsUndefined()) return UNCHANGED;
   if (deferred_key) {
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 7207f8d..b93460d 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1408,7 +1408,7 @@
   INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
                                          HeapObject* obj)) {
 #ifdef DEBUG
-    ASSERT(Isolate::Current()->heap()->Contains(obj));
+    ASSERT(collector->heap()->Contains(obj));
     ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
 #endif
     Map* map = obj->map();
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 18353b6..9a6378b 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1287,7 +1287,7 @@
     Handle<JSObject> holder,
     Handle<Name> name,
     Label* success,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<Object> callback) {
   Label miss;
 
   Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1374,6 +1374,24 @@
 
 
 void BaseLoadStubCompiler::GenerateLoadCallback(
+    const CallOptimization& call_optimization) {
+  ASSERT(call_optimization.is_simple_api_call());
+
+  // Assign stack space for the call arguments.
+  __ Subu(sp, sp, Operand((kFastApiCallArguments + 1) * kPointerSize));
+
+  int argc = 0;
+  int api_call_argc = argc + kFastApiCallArguments;
+  // Write holder to stack frame.
+  __ sw(receiver(), MemOperand(sp, 0));
+  // Write receiver to stack frame.
+  __ sw(receiver(), MemOperand(sp, api_call_argc * kPointerSize));
+
+  GenerateFastApiDirectCall(masm(), call_optimization, argc);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
     Register reg,
     Handle<ExecutableAccessorInfo> callback) {
   // Build AccessorInfo::args_ list on the stack and push property name below
diff --git a/src/objects-inl.h b/src/objects-inl.h
index fc5ed24..cd8426f 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1185,7 +1185,6 @@
   Heap* heap =
       MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
   ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
   return heap;
 }
 
@@ -4607,7 +4606,8 @@
               kFunctionTokenPositionOffset)
 SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
               kCompilerHintsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
+SMI_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason,
+              kOptCountAndBailoutReasonOffset)
 SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
 
 #else
@@ -4656,7 +4656,9 @@
                         compiler_hints,
                         kCompilerHintsOffset)
 
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, opt_count, kOptCountOffset)
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
+                        opt_count_and_bailout_reason,
+                        kOptCountAndBailoutReasonOffset)
 
 PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, counters, kCountersOffset)
 
@@ -4903,6 +4905,24 @@
 }
 
 
+int SharedFunctionInfo::opt_count() {
+  return OptCountBits::decode(opt_count_and_bailout_reason());
+}
+
+
+void SharedFunctionInfo::set_opt_count(int opt_count) {
+  set_opt_count_and_bailout_reason(
+      OptCountBits::update(opt_count_and_bailout_reason(), opt_count));
+}
+
+
+BailoutReason SharedFunctionInfo::DisableOptimizationReason() {
+  BailoutReason reason = static_cast<BailoutReason>(
+      DisabledOptimizationReasonBits::decode(opt_count_and_bailout_reason()));
+  return reason;
+}
+
+
 bool SharedFunctionInfo::has_deoptimization_support() {
   Code* code = this->code();
   return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
@@ -6177,7 +6197,6 @@
 
 
 Relocatable::Relocatable(Isolate* isolate) {
-  ASSERT(isolate == Isolate::Current());
   isolate_ = isolate;
   prev_ = isolate->relocatable_top();
   isolate->set_relocatable_top(this);
@@ -6185,7 +6204,6 @@
 
 
 Relocatable::~Relocatable() {
-  ASSERT(isolate_ == Isolate::Current());
   ASSERT_EQ(isolate_->relocatable_top(), this);
   isolate_->set_relocatable_top(prev_);
 }
diff --git a/src/objects.cc b/src/objects.cc
index 9cb077b..ca10394 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -32,6 +32,7 @@
 #include "arguments.h"
 #include "bootstrapper.h"
 #include "codegen.h"
+#include "cpu-profiler.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "date.h"
@@ -40,6 +41,7 @@
 #include "full-codegen.h"
 #include "hydrogen.h"
 #include "isolate-inl.h"
+#include "log.h"
 #include "objects-inl.h"
 #include "objects-visiting.h"
 #include "objects-visiting-inl.h"
@@ -485,8 +487,8 @@
 #endif
 
   bool has_pending_exception;
-  Handle<Object> result =
-      Execution::Call(fun, self, 0, NULL, &has_pending_exception, true);
+  Handle<Object> result = Execution::Call(
+      isolate, fun, self, 0, NULL, &has_pending_exception, true);
   // Check for pending exception and return the result.
   if (has_pending_exception) return Failure::Exception();
   return *result;
@@ -2089,7 +2091,8 @@
   }
   Handle<Object> args[] = { type, object, name, old_value };
   bool threw;
-  Execution::Call(Handle<JSFunction>(isolate->observers_notify_change()),
+  Execution::Call(isolate,
+                  Handle<JSFunction>(isolate->observers_notify_change()),
                   isolate->factory()->undefined_value(),
                   old_value->IsTheHole() ? 3 : 4, args,
                   &threw);
@@ -2101,6 +2104,7 @@
   ASSERT(isolate->observer_delivery_pending());
   bool threw = false;
   Execution::Call(
+      isolate,
       isolate->observers_deliver_changes(),
       isolate->factory()->undefined_value(),
       0,
@@ -2877,7 +2881,8 @@
 #endif
   bool has_pending_exception;
   Handle<Object> argv[] = { value_handle };
-  Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
+  Execution::Call(
+      isolate, fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
   // Check for pending exception and return the result.
   if (has_pending_exception) return Failure::Exception();
   return *value_handle;
@@ -3492,9 +3497,9 @@
   // Emulate [[GetProperty]] semantics for proxies.
   bool has_pending_exception;
   Handle<Object> argv[] = { result };
-  Handle<Object> desc =
-      Execution::Call(isolate->to_complete_property_descriptor(), result,
-                      ARRAY_SIZE(argv), argv, &has_pending_exception);
+  Handle<Object> desc = Execution::Call(
+      isolate, isolate->to_complete_property_descriptor(), result,
+      ARRAY_SIZE(argv), argv, &has_pending_exception);
   if (has_pending_exception) return Failure::Exception();
 
   // [[GetProperty]] requires to check that all properties are configurable.
@@ -3617,9 +3622,9 @@
 
   bool has_pending_exception;
   Handle<Object> argv[] = { result };
-  Handle<Object> desc =
-      Execution::Call(isolate->to_complete_property_descriptor(), result,
-                      ARRAY_SIZE(argv), argv, &has_pending_exception);
+  Handle<Object> desc = Execution::Call(
+      isolate, isolate->to_complete_property_descriptor(), result,
+      ARRAY_SIZE(argv), argv, &has_pending_exception);
   if (has_pending_exception) return NONE;
 
   // Convert result to PropertyAttributes.
@@ -3717,7 +3722,7 @@
   }
 
   bool threw;
-  return Execution::Call(trap, handler, argc, argv, &threw);
+  return Execution::Call(isolate, trap, handler, argc, argv, &threw);
 }
 
 
@@ -9849,12 +9854,16 @@
   // non-optimizable if optimization is disabled for the shared
   // function info.
   set_optimization_disabled(true);
+  set_bailout_reason(reason);
   // Code should be the lazy compilation stub or else unoptimized.  If the
   // latter, disable optimization for the code too.
   ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
   if (code()->kind() == Code::FUNCTION) {
     code()->set_optimizable(false);
   }
+  PROFILE(Isolate::Current(),
+      LogExistingFunction(Handle<SharedFunctionInfo>(this),
+                          Handle<Code>(code())));
   if (FLAG_trace_opt) {
     PrintF("[disabled optimization for ");
     ShortPrint();
@@ -11045,7 +11054,8 @@
       { object, index_object, deleted, add_count_object };
 
   bool threw;
-  Execution::Call(Handle<JSFunction>(isolate->observers_enqueue_splice()),
+  Execution::Call(isolate,
+                  Handle<JSFunction>(isolate->observers_enqueue_splice()),
                   isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
                   &threw);
   ASSERT(!threw);
@@ -11058,7 +11068,8 @@
   Handle<Object> args[] = { object };
 
   bool threw;
-  Execution::Call(Handle<JSFunction>(isolate->observers_begin_perform_splice()),
+  Execution::Call(isolate,
+                  Handle<JSFunction>(isolate->observers_begin_perform_splice()),
                   isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
                   &threw);
   ASSERT(!threw);
@@ -11071,7 +11082,8 @@
   Handle<Object> args[] = { object };
 
   bool threw;
-  Execution::Call(Handle<JSFunction>(isolate->observers_end_perform_splice()),
+  Execution::Call(isolate,
+                  Handle<JSFunction>(isolate->observers_end_perform_splice()),
                   isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
                   &threw);
   ASSERT(!threw);
diff --git a/src/objects.h b/src/objects.h
index f001feb..30b1b85 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1121,6 +1121,7 @@
     "Expected property cell in register rbx")                                 \
   V(kExpectingAlignmentForCopyBytes,                                          \
     "Expecting alignment for CopyBytes")                                      \
+  V(kExportDeclaration, "Export declaration")                                 \
   V(kExternalStringExpectedButNotFound,                                       \
     "external string expected, but not found")                                \
   V(kFailedBailedOutLastTime, "failed/bailed out last time")                  \
@@ -1140,6 +1141,7 @@
   V(kGlobalFunctionsMustHaveInitialMap,                                       \
     "Global functions must have initial map")                                 \
   V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered")      \
+  V(kImportDeclaration, "Import declaration")                                 \
   V(kImproperObjectOnPrototypeChainForStore,                                  \
     "improper object on prototype chain for store")                           \
   V(kIndexIsNegative, "Index is negative")                                    \
@@ -1196,6 +1198,12 @@
   V(kLookupVariableInCountOperation,                                          \
     "lookup variable in count operation")                                     \
   V(kMapIsNoLongerInEax, "Map is no longer in eax")                           \
+  V(kModuleDeclaration, "Module declaration")                                 \
+  V(kModuleLiteral, "Module literal")                                         \
+  V(kModulePath, "Module path")                                               \
+  V(kModuleStatement, "Module statement")                                     \
+  V(kModuleVariable, "Module variable")                                       \
+  V(kModuleUrl, "Module url")                                                 \
   V(kNoCasesLeft, "no cases left")                                            \
   V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin,                               \
     "No empty arrays here in EmitFastAsciiArrayJoin")                         \
@@ -1237,7 +1245,7 @@
   V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
   V(kRegisterWasClobbered, "register was clobbered")                          \
   V(kScopedBlock, "ScopedBlock")                                              \
-  V(kSharedFunctionInfoLiteral, "SharedFunctionInfoLiteral")                  \
+  V(kSharedFunctionInfoLiteral, "Shared function info literal")               \
   V(kSmiAdditionOverflow, "Smi addition overflow")                            \
   V(kSmiSubtractionOverflow, "Smi subtraction overflow")                      \
   V(kStackFrameTypesMustMatch, "stack frame types must match")                \
@@ -1323,7 +1331,8 @@
     "we should not have an empty lexical context")                            \
   V(kWithStatement, "WithStatement")                                          \
   V(kWrongAddressOrValuePassedToRecordWrite,                                  \
-    "Wrong address or value passed to RecordWrite")
+    "Wrong address or value passed to RecordWrite")                           \
+  V(kYield, "Yield")
 
 
 #define ERROR_MESSAGES_CONSTANTS(C, T) C,
@@ -1713,9 +1722,7 @@
   // The Heap the object was allocated in. Used also to access Isolate.
   inline Heap* GetHeap();
 
-  // Convenience method to get current isolate. This method can be
-  // accessed only when its result is the same as
-  // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
+  // Convenience method to get current isolate.
   inline Isolate* GetIsolate();
 
   // Converts an address to a HeapObject pointer.
@@ -6560,6 +6567,8 @@
   // shared function info.
   void DisableOptimization(BailoutReason reason);
 
+  inline BailoutReason DisableOptimizationReason();
+
   // Lookup the bailout ID and ASSERT that it exists in the non-optimized
   // code, returns whether it asserted (i.e., always true if assertions are
   // disabled).
@@ -6589,6 +6598,21 @@
   inline void set_counters(int value);
   inline int counters();
 
+  // Stores opt_count and bailout_reason as bit-fields.
+  inline void set_opt_count_and_bailout_reason(int value);
+  inline int opt_count_and_bailout_reason();
+
+  void set_bailout_reason(BailoutReason reason) {
+    set_opt_count_and_bailout_reason(
+        DisabledOptimizationReasonBits::update(opt_count_and_bailout_reason(),
+                                               reason));
+  }
+
+  void set_dont_optimize_reason(BailoutReason reason) {
+    set_bailout_reason(reason);
+    set_dont_optimize(reason != kNoReason);
+  }
+
   // Source size of this function.
   int SourceSize();
 
@@ -6655,8 +6679,10 @@
       kEndPositionOffset + kPointerSize;
   static const int kCompilerHintsOffset =
       kFunctionTokenPositionOffset + kPointerSize;
-  static const int kOptCountOffset = kCompilerHintsOffset + kPointerSize;
-  static const int kCountersOffset = kOptCountOffset + kPointerSize;
+  static const int kOptCountAndBailoutReasonOffset =
+      kCompilerHintsOffset + kPointerSize;
+  static const int kCountersOffset =
+      kOptCountAndBailoutReasonOffset + kPointerSize;
 
   // Total size.
   static const int kSize = kCountersOffset + kPointerSize;
@@ -6690,9 +6716,11 @@
   static const int kCompilerHintsOffset =
       kFunctionTokenPositionOffset + kIntSize;
 
-  static const int kOptCountOffset = kCompilerHintsOffset + kIntSize;
+  static const int kOptCountAndBailoutReasonOffset =
+      kCompilerHintsOffset + kIntSize;
 
-  static const int kCountersOffset = kOptCountOffset + kIntSize;
+  static const int kCountersOffset =
+      kOptCountAndBailoutReasonOffset + kIntSize;
 
   // Total size.
   static const int kSize = kCountersOffset + kIntSize;
@@ -6751,6 +6779,9 @@
   class OptReenableTriesBits: public BitField<int, 4, 18> {};
   class ICAgeBits: public BitField<int, 22, 8> {};
 
+  class OptCountBits: public BitField<int, 0, 22> {};
+  class DisabledOptimizationReasonBits: public BitField<int, 22, 8> {};
+
  private:
 #if V8_HOST_ARCH_32_BIT
   // On 32 bit platforms, compiler hints is a smi.
diff --git a/src/parser.cc b/src/parser.cc
index 257f80d..05ae11e 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -687,6 +687,8 @@
           FunctionLiteral::kNotParenthesized,
           FunctionLiteral::kNotGenerator);
       result->set_ast_properties(factory()->visitor()->ast_properties());
+      result->set_dont_optimize_reason(
+          factory()->visitor()->dont_optimize_reason());
     } else if (stack_overflow_) {
       isolate()->StackOverflow();
     }
@@ -4334,6 +4336,7 @@
       ? FunctionLiteral::kIsGenerator
       : FunctionLiteral::kNotGenerator;
   AstProperties ast_properties;
+  BailoutReason dont_optimize_reason = kNoReason;
   // Parse function body.
   { FunctionState function_state(this, scope, isolate());
     top_scope_->SetScopeName(function_name);
@@ -4593,6 +4596,7 @@
                         CHECK_OK);
     }
     ast_properties = *factory()->visitor()->ast_properties();
+    dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
   }
 
   if (is_extended_mode()) {
@@ -4614,6 +4618,7 @@
                                     generator);
   function_literal->set_function_token_position(function_token_position);
   function_literal->set_ast_properties(&ast_properties);
+  function_literal->set_dont_optimize_reason(dont_optimize_reason);
 
   if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
   return function_literal;
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 10525d9..f2154a4 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -52,9 +52,6 @@
 namespace internal {
 
 
-static Mutex* limit_mutex = NULL;
-
-
 const char* OS::LocalTimezone(double time) {
   if (std::isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -76,31 +73,6 @@
 }
 
 
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap.  The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
-  ASSERT(limit_mutex != NULL);
-  LockGuard<Mutex> lock_guard(limit_mutex);
-
-  lowest_ever_allocated = Min(lowest_ever_allocated, address);
-  highest_ever_allocated =
-      Max(highest_ever_allocated,
-          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
-  return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
@@ -112,7 +84,6 @@
     return NULL;
   }
   *allocated = msize;
-  UpdateAllocatedSpaceLimits(mbase, msize);
   return mbase;
 }
 
@@ -365,8 +336,6 @@
   if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
     return false;
   }
-
-  UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
   return true;
 }
 
@@ -406,12 +375,6 @@
   // call this setup code within the same millisecond.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
-  limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
-  delete limit_mutex;
 }
 
 
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 5e4e828..b21ebf7 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -63,9 +63,6 @@
 namespace internal {
 
 
-static Mutex* limit_mutex = NULL;
-
-
 const char* OS::LocalTimezone(double time) {
   if (std::isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -84,31 +81,6 @@
 }
 
 
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap.  The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
-  ASSERT(limit_mutex != NULL);
-  LockGuard<Mutex> lock_guard(limit_mutex);
-
-  lowest_ever_allocated = Min(lowest_ever_allocated, address);
-  highest_ever_allocated =
-      Max(highest_ever_allocated,
-          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
-  return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool executable) {
@@ -121,7 +93,6 @@
     return NULL;
   }
   *allocated = msize;
-  UpdateAllocatedSpaceLimits(mbase, msize);
   return mbase;
 }
 
@@ -345,8 +316,6 @@
                          kMmapFdOffset)) {
     return false;
   }
-
-  UpdateAllocatedSpaceLimits(base, size);
   return true;
 }
 
@@ -380,12 +349,6 @@
   // call this setup code within the same millisecond.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
-  limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
-  delete limit_mutex;
 }
 
 
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 3416da3..30825d7 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -76,9 +76,6 @@
 namespace internal {
 
 
-static Mutex* limit_mutex = NULL;
-
-
 #ifdef __arm__
 
 bool OS::ArmUsingHardFloat() {
@@ -140,31 +137,6 @@
 }
 
 
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap.  The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
-  ASSERT(limit_mutex != NULL);
-  LockGuard<Mutex> lock_guard(limit_mutex);
-
-  lowest_ever_allocated = Min(lowest_ever_allocated, address);
-  highest_ever_allocated =
-      Max(highest_ever_allocated,
-          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
-  return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
@@ -178,7 +150,6 @@
     return NULL;
   }
   *allocated = msize;
-  UpdateAllocatedSpaceLimits(mbase, msize);
   return mbase;
 }
 
@@ -472,7 +443,6 @@
     return false;
   }
 
-  UpdateAllocatedSpaceLimits(base, size);
   return true;
 }
 
@@ -501,12 +471,6 @@
   // Seed the random number generator. We preserve microsecond resolution.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
-  limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
-  delete limit_mutex;
 }
 
 
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index d6d3128..5dd38b2 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -79,34 +79,6 @@
 namespace internal {
 
 
-static Mutex* limit_mutex = NULL;
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap.  The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
-  ASSERT(limit_mutex != NULL);
-  LockGuard<Mutex> lock(limit_mutex);
-
-  lowest_ever_allocated = Min(lowest_ever_allocated, address);
-  highest_ever_allocated =
-      Max(highest_ever_allocated,
-          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
-  return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
 // Constants used for mmap.
 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
 // defined tag 255 This helps identify V8-allocated regions in memory analysis
@@ -131,7 +103,6 @@
     return NULL;
   }
   *allocated = msize;
-  UpdateAllocatedSpaceLimits(mbase, msize);
   return mbase;
 }
 
@@ -366,8 +337,6 @@
                          kMmapFdOffset)) {
     return false;
   }
-
-  UpdateAllocatedSpaceLimits(address, size);
   return true;
 }
 
@@ -396,12 +365,6 @@
   // Seed the random number generator. We preserve microsecond resolution.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
-  limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
-  delete limit_mutex;
 }
 
 
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 84039d3..eb48373 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -61,9 +61,6 @@
 namespace internal {
 
 
-static Mutex* limit_mutex = NULL;
-
-
 const char* OS::LocalTimezone(double time) {
   if (std::isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -82,31 +79,6 @@
 }
 
 
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap.  The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
-  ASSERT(limit_mutex != NULL);
-  LockGuard<Mutex> lock(limit_mutex);
-
-  lowest_ever_allocated = Min(lowest_ever_allocated, address);
-  highest_ever_allocated =
-      Max(highest_ever_allocated,
-          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
-  return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
@@ -120,7 +92,6 @@
     return NULL;
   }
   *allocated = msize;
-  UpdateAllocatedSpaceLimits(mbase, msize);
   return mbase;
 }
 
@@ -402,8 +373,6 @@
                          kMmapFdOffset)) {
     return false;
   }
-
-  UpdateAllocatedSpaceLimits(base, size);
   return true;
 }
 
@@ -433,12 +402,6 @@
   // Seed the random number generator. We preserve microsecond resolution.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
-  limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
-  delete limit_mutex;
 }
 
 
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 99636d6..d8b60c3 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -81,9 +81,6 @@
 namespace internal {
 
 
-static Mutex* limit_mutex = NULL;
-
-
 const char* OS::LocalTimezone(double time) {
   if (std::isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -99,31 +96,6 @@
 }
 
 
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap.  The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
-  ASSERT(limit_mutex != NULL);
-  LockGuard<Mutex> lock_guard(limit_mutex);
-
-  lowest_ever_allocated = Min(lowest_ever_allocated, address);
-  highest_ever_allocated =
-      Max(highest_ever_allocated,
-          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
-  return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
@@ -136,7 +108,6 @@
     return NULL;
   }
   *allocated = msize;
-  UpdateAllocatedSpaceLimits(mbase, msize);
   return mbase;
 }
 
@@ -366,8 +337,6 @@
                          kMmapFdOffset)) {
     return false;
   }
-
-  UpdateAllocatedSpaceLimits(base, size);
   return true;
 }
 
@@ -401,12 +370,6 @@
   // call this setup code within the same millisecond.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
-  limit_mutex = new Mutex();
-}
-
-
-void OS::TearDown() {
-  delete limit_mutex;
 }
 
 
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 87387e7..c98489f 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -144,8 +144,6 @@
 }
 
 
-static Mutex* limit_mutex = NULL;
-
 #if V8_TARGET_ARCH_IA32
 static void MemMoveWrapper(void* dest, const void* src, size_t size) {
   memmove(dest, src, size);
@@ -750,35 +748,6 @@
 #undef _TRUNCATE
 #undef STRUNCATE
 
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap.  The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
-  ASSERT(limit_mutex != NULL);
-  LockGuard<Mutex> lock_guard(limit_mutex);
-
-  lowest_ever_allocated = Min(lowest_ever_allocated, address);
-  highest_ever_allocated =
-      Max(highest_ever_allocated,
-          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* pointer) {
-  if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
-    return true;
-  // Ask the Windows API
-  if (IsBadWritePtr(pointer, 1))
-    return true;
-  return false;
-}
-
 
 // Get the system's page size used by VirtualAlloc() or the next power
 // of two. The reason for always returning a power of two is that the
@@ -872,7 +841,6 @@
   ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
 
   *allocated = msize;
-  UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
   return mbase;
 }
 
@@ -1490,8 +1458,6 @@
   if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
     return false;
   }
-
-  UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
   return true;
 }
 
@@ -1623,13 +1589,6 @@
   // call this setup code within the same millisecond.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srand(static_cast<unsigned int>(seed));
-  limit_mutex = new Mutex();
 }
 
-
-void OS::TearDown() {
-  delete limit_mutex;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/platform.h b/src/platform.h
index 18bf6b0..6ae3fa0 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -178,9 +178,6 @@
   // called after CPU initialization.
   static void PostSetUp();
 
-  // Clean up platform-OS-related things. Called once at VM shutdown.
-  static void TearDown();
-
   // Returns the accumulated user time for thread. This routine
   // can be used for profiling. The implementation should
   // strive for high-precision timer resolution, preferable
@@ -254,13 +251,6 @@
   // Get the Alignment guaranteed by Allocate().
   static size_t AllocateAlignment();
 
-  // Returns an indication of whether a pointer is in a space that
-  // has been allocated by Allocate().  This method may conservatively
-  // always return false, but giving more accurate information may
-  // improve the robustness of the stack dump code in the presence of
-  // heap corruption.
-  static bool IsOutsideAllocatedSpace(void* pointer);
-
   // Sleep for a number of milliseconds.
   static void Sleep(const int milliseconds);
 
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 5a984e5..f2feb73 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -56,8 +56,8 @@
       line_number_(line_number),
       shared_id_(0),
       script_id_(v8::Script::kNoScriptId),
-      no_frame_ranges_(NULL) {
-}
+      no_frame_ranges_(NULL),
+      bailout_reason_(kEmptyBailoutReason) { }
 
 
 bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
@@ -75,8 +75,7 @@
       entry_(entry),
       self_ticks_(0),
       children_(CodeEntriesMatch),
-      id_(tree->next_node_id()) {
-}
+      id_(tree->next_node_id()) { }
 
 
 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 19090a0..3c11521 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -133,6 +133,7 @@
 
 const char* const CodeEntry::kEmptyNamePrefix = "";
 const char* const CodeEntry::kEmptyResourceName = "";
+const char* const CodeEntry::kEmptyBailoutReason = "";
 
 
 CodeEntry::~CodeEntry() {
@@ -210,13 +211,14 @@
 
 
 void ProfileNode::Print(int indent) {
-  OS::Print("%5u %*c %s%s %d #%d",
+  OS::Print("%5u %*c %s%s %d #%d %s",
             self_ticks_,
             indent, ' ',
             entry_->name_prefix(),
             entry_->name(),
             entry_->script_id(),
-            id());
+            id(),
+            entry_->bailout_reason());
   if (entry_->resource_name()[0] != '\0')
     OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
   OS::Print("\n");
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 70f00de..9ce5f23 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -88,6 +88,10 @@
   INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
   INLINE(int script_id() const) { return script_id_; }
   INLINE(void set_script_id(int script_id)) { script_id_ = script_id; }
+  INLINE(void set_bailout_reason(const char* bailout_reason)) {
+    bailout_reason_ = bailout_reason;
+  }
+  INLINE(const char* bailout_reason() const) { return bailout_reason_; }
 
   INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
 
@@ -105,6 +109,7 @@
 
   static const char* const kEmptyNamePrefix;
   static const char* const kEmptyResourceName;
+  static const char* const kEmptyBailoutReason;
 
  private:
   Logger::LogEventsAndTags tag_ : 8;
@@ -116,6 +121,7 @@
   int shared_id_;
   int script_id_;
   List<OffsetRange>* no_frame_ranges_;
+  const char* bailout_reason_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeEntry);
 };
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index fa79276..7d027f8 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -163,7 +163,6 @@
     int* output,
     int output_size,
     Isolate* isolate) {
-  ASSERT(isolate == Isolate::Current());
   // Ensure that the minimum stack has been allocated.
   RegExpStackScope stack_scope(isolate);
   Address stack_base = stack_scope.stack()->stack_base();
@@ -238,7 +237,6 @@
     Address byte_offset2,
     size_t byte_length,
     Isolate* isolate) {
-  ASSERT(isolate == Isolate::Current());
   unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
       isolate->regexp_macro_assembler_canonicalize();
   // This function is not allowed to cause a garbage collection.
@@ -271,7 +269,6 @@
 Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
                                               Address* stack_base,
                                               Isolate* isolate) {
-  ASSERT(isolate == Isolate::Current());
   RegExpStack* regexp_stack = isolate->regexp_stack();
   size_t size = regexp_stack->stack_capacity();
   Address old_stack_base = regexp_stack->stack_base();
diff --git a/src/regexp-stack.cc b/src/regexp-stack.cc
index fc4114a..f3af490 100644
--- a/src/regexp-stack.cc
+++ b/src/regexp-stack.cc
@@ -39,7 +39,6 @@
 
 
 RegExpStackScope::~RegExpStackScope() {
-  ASSERT(Isolate::Current() == regexp_stack_->isolate_);
   // Reset the buffer if it has grown.
   regexp_stack_->Reset();
 }
diff --git a/src/runtime.cc b/src/runtime.cc
index 5c6bf39..9e44b58 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -2469,7 +2469,7 @@
   }
   Object* new_object;
   { MaybeObject* maybe_new_object =
-        isolate->heap()->AllocateFixedArrayWithHoles(elements_count);
+        isolate->heap()->AllocateFixedArray(elements_count);
     if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
   }
   FixedArray* elements = FixedArray::cast(new_object);
@@ -8667,8 +8667,8 @@
   bool threw;
   Handle<JSReceiver> hfun(fun);
   Handle<Object> hreceiver(receiver, isolate);
-  Handle<Object> result =
-      Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+  Handle<Object> result = Execution::Call(
+      isolate, hfun, hreceiver, argc, argv, &threw, true);
 
   if (threw) return Failure::Exception();
   return *result;
@@ -8702,8 +8702,8 @@
   }
 
   bool threw;
-  Handle<Object> result =
-      Execution::Call(fun, receiver, argc, argv, &threw, true);
+  Handle<Object> result = Execution::Call(
+      isolate, fun, receiver, argc, argv, &threw, true);
 
   if (threw) return Failure::Exception();
   return *result;
@@ -12582,7 +12582,7 @@
           shared, context, NOT_TENURED);
   bool pending_exception;
   Handle<Object> result = Execution::Call(
-      eval_fun, receiver, 0, NULL, &pending_exception);
+      isolate, eval_fun, receiver, 0, NULL, &pending_exception);
 
   if (pending_exception) return Failure::Exception();
 
@@ -13353,11 +13353,19 @@
   bool pending_exception;
   {
     if (without_debugger) {
-      result = Execution::Call(function, isolate->global_object(), 0, NULL,
+      result = Execution::Call(isolate,
+                               function,
+                               isolate->global_object(),
+                               0,
+                               NULL,
                                &pending_exception);
     } else {
       EnterDebugger enter_debugger(isolate);
-      result = Execution::Call(function, isolate->global_object(), 0, NULL,
+      result = Execution::Call(isolate,
+                               function,
+                               isolate->global_object(),
+                               0,
+                               NULL,
                                &pending_exception);
     }
   }
@@ -14224,7 +14232,8 @@
     // This handle is nor shared, nor used later, so it's safe.
     Handle<Object> argv[] = { key_handle };
     bool pending_exception;
-    value = Execution::Call(factory,
+    value = Execution::Call(isolate,
+                            factory,
                             receiver,
                             ARRAY_SIZE(argv),
                             argv,
@@ -14593,7 +14602,6 @@
 
 MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
                                                        Object* dictionary) {
-  ASSERT(Isolate::Current()->heap() == heap);
   ASSERT(dictionary != NULL);
   ASSERT(NameDictionary::cast(dictionary)->NumberOfElements() == 0);
   for (int i = 0; i < kNumFunctions; ++i) {
diff --git a/src/spaces.cc b/src/spaces.cc
index d527c73..13f80d8 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -228,10 +228,10 @@
   }
   ASSERT(*allocated <= current.size);
   ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
-  if (!MemoryAllocator::CommitExecutableMemory(code_range_,
-                                               current.start,
-                                               commit_size,
-                                               *allocated)) {
+  if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
+                                                            current.start,
+                                                            commit_size,
+                                                            *allocated)) {
     *allocated = 0;
     return NULL;
   }
@@ -245,7 +245,7 @@
 
 
 bool CodeRange::CommitRawMemory(Address start, size_t length) {
-  return code_range_->Commit(start, length, true);
+  return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
 }
 
 
@@ -278,7 +278,9 @@
       capacity_(0),
       capacity_executable_(0),
       size_(0),
-      size_executable_(0) {
+      size_executable_(0),
+      lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
+      highest_ever_allocated_(reinterpret_cast<void*>(0)) {
 }
 
 
@@ -304,6 +306,17 @@
 }
 
 
+bool MemoryAllocator::CommitMemory(Address base,
+                                   size_t size,
+                                   Executability executable) {
+  if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
+    return false;
+  }
+  UpdateAllocatedSpaceLimits(base, base + size);
+  return true;
+}
+
+
 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
@@ -383,7 +396,9 @@
       base = NULL;
     }
   } else {
-    if (!reservation.Commit(base, commit_size, false)) {
+    if (reservation.Commit(base, commit_size, false)) {
+      UpdateAllocatedSpaceLimits(base, base + commit_size);
+    } else {
       base = NULL;
     }
   }
@@ -509,7 +524,10 @@
     Address start = address() + committed_size + guard_size;
     size_t length = commit_size - committed_size;
     if (reservation_.IsReserved()) {
-      if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
+      Executability executable = IsFlagSet(IS_EXECUTABLE)
+          ? EXECUTABLE : NOT_EXECUTABLE;
+      if (!heap()->isolate()->memory_allocator()->CommitMemory(
+              start, length, executable)) {
         return false;
       }
     } else {
@@ -763,7 +781,7 @@
 bool MemoryAllocator::CommitBlock(Address start,
                                   size_t size,
                                   Executability executable) {
-  if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
+  if (!CommitMemory(start, size, executable)) return false;
 
   if (Heap::ShouldZapGarbage()) {
     ZapBlock(start, size);
@@ -899,6 +917,9 @@
     return false;
   }
 
+  UpdateAllocatedSpaceLimits(start,
+                             start + CodePageAreaStartOffset() +
+                             commit_size - CodePageGuardStartOffset());
   return true;
 }
 
diff --git a/src/spaces.h b/src/spaces.h
index 92c135d..5b9a4e4 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1083,6 +1083,13 @@
     return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
   }
 
+  // Returns an indication of whether a pointer is in a space that has
+  // been allocated by this MemoryAllocator.
+  V8_INLINE(bool IsOutsideAllocatedSpace(const void* address)) const {
+    return address < lowest_ever_allocated_ ||
+        address >= highest_ever_allocated_;
+  }
+
 #ifdef DEBUG
   // Reports statistic info of the space.
   void ReportStatistics();
@@ -1105,6 +1112,8 @@
                                 Executability executable,
                                 VirtualMemory* controller);
 
+  bool CommitMemory(Address addr, size_t size, Executability executable);
+
   void FreeMemory(VirtualMemory* reservation, Executability executable);
   void FreeMemory(Address addr, size_t size, Executability executable);
 
@@ -1150,10 +1159,10 @@
     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
   }
 
-  MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
-                                                     Address start,
-                                                     size_t commit_size,
-                                                     size_t reserved_size);
+  MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+                                              Address start,
+                                              size_t commit_size,
+                                              size_t reserved_size);
 
  private:
   Isolate* isolate_;
@@ -1168,6 +1177,14 @@
   // Allocated executable space size in bytes.
   size_t size_executable_;
 
+  // We keep the lowest and highest addresses allocated as a quick way
+  // of determining that pointers are outside the heap. The estimate is
+  // conservative, i.e. not all addrsses in 'allocated' space are allocated
+  // to our heap. The range is [lowest, highest[, inclusive on the low end
+  // and exclusive on the high end.
+  void* lowest_ever_allocated_;
+  void* highest_ever_allocated_;
+
   struct MemoryAllocationCallbackRegistration {
     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
                                          ObjectSpace space,
@@ -1190,6 +1207,11 @@
   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
                                PagedSpace* owner);
 
+  void UpdateAllocatedSpaceLimits(void* low, void* high) {
+    lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
+    highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+  }
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
 };
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 19cfd5a..b312b1e 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -45,9 +45,7 @@
 
 
 StubCache::StubCache(Isolate* isolate)
-    : isolate_(isolate) {
-  ASSERT(isolate == Isolate::Current());
-}
+    : isolate_(isolate) { }
 
 
 void StubCache::Initialize() {
@@ -315,6 +313,24 @@
 }
 
 
+Handle<Code> StubCache::ComputeLoadCallback(
+    Handle<Name> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    const CallOptimization& call_optimization) {
+  Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+  Handle<Code> stub = FindLoadHandler(
+      name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
+  if (!stub.is_null()) return stub;
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> handler =
+      compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
+  JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+  return handler;
+}
+
+
 Handle<Code> StubCache::ComputeLoadViaGetter(Handle<Name> name,
                                              Handle<JSObject> receiver,
                                              Handle<JSObject> holder,
@@ -465,6 +481,24 @@
 }
 
 
+Handle<Code> StubCache::ComputeKeyedLoadCallback(
+    Handle<Name> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    const CallOptimization& call_optimization) {
+  Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+  Handle<Code> stub = FindLoadHandler(
+      name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
+  if (!stub.is_null()) return stub;
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> handler =
+      compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
+  JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+  return handler;
+}
+
+
 Handle<Code> StubCache::ComputeStoreField(Handle<Name> name,
                                           Handle<JSObject> receiver,
                                           LookupResult* lookup,
@@ -1648,6 +1682,25 @@
 }
 
 
+Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<Name> name,
+    const CallOptimization& call_optimization) {
+  ASSERT(call_optimization.is_simple_api_call());
+  Label success;
+
+  Handle<JSFunction> callback = call_optimization.constant_function();
+  CallbackHandlerFrontend(
+      object, receiver(), holder, name, &success, callback);
+  __ bind(&success);
+  GenerateLoadCallback(call_optimization);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
 Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor(
     Handle<JSObject> object,
     Handle<JSObject> holder,
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 25009b9..a267100 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -48,6 +48,8 @@
 // invalidate the cache whenever a prototype map is changed.  The stub
 // validates the map chain as in the mono-morphic case.
 
+
+class CallOptimization;
 class SmallMapList;
 class StubCache;
 
@@ -136,6 +138,11 @@
                                    Handle<JSObject> holder,
                                    Handle<ExecutableAccessorInfo> callback);
 
+  Handle<Code> ComputeLoadCallback(Handle<Name> name,
+                                   Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   const CallOptimization& call_optimization);
+
   Handle<Code> ComputeLoadViaGetter(Handle<Name> name,
                                     Handle<JSObject> object,
                                     Handle<JSObject> holder,
@@ -173,6 +180,12 @@
       Handle<JSObject> holder,
       Handle<ExecutableAccessorInfo> callback);
 
+  Handle<Code> ComputeKeyedLoadCallback(
+      Handle<Name> name,
+      Handle<JSObject> object,
+      Handle<JSObject> holder,
+      const CallOptimization& call_optimization);
+
   Handle<Code> ComputeKeyedLoadConstant(Handle<Name> name,
                                         Handle<JSObject> object,
                                         Handle<JSObject> holder,
@@ -705,6 +718,11 @@
                                    Handle<Name> name,
                                    Handle<ExecutableAccessorInfo> callback);
 
+  Handle<Code> CompileLoadCallback(Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<Name> name,
+                                   const CallOptimization& call_optimization);
+
   Handle<Code> CompileLoadConstant(Handle<JSObject> object,
                                    Handle<JSObject> holder,
                                    Handle<Name> name,
@@ -730,7 +748,7 @@
                                    Handle<JSObject> holder,
                                    Handle<Name> name,
                                    Label* success,
-                                   Handle<ExecutableAccessorInfo> callback);
+                                   Handle<Object> callback);
   void NonexistentHandlerFrontend(Handle<JSObject> object,
                                   Handle<JSObject> last,
                                   Handle<Name> name,
@@ -744,6 +762,7 @@
   void GenerateLoadConstant(Handle<Object> value);
   void GenerateLoadCallback(Register reg,
                             Handle<ExecutableAccessorInfo> callback);
+  void GenerateLoadCallback(const CallOptimization& call_optimization);
   void GenerateLoadInterceptor(Register holder_reg,
                                Handle<JSObject> object,
                                Handle<JSObject> holder,
@@ -1028,8 +1047,6 @@
   V(ArrayCode)
 
 
-class CallOptimization;
-
 class CallStubCompiler: public StubCompiler {
  public:
   CallStubCompiler(Isolate* isolate,
@@ -1161,6 +1178,12 @@
   int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
                                       Handle<JSObject> holder) const;
 
+  bool IsCompatibleReceiver(Object* receiver) {
+    ASSERT(is_simple_api_call());
+    if (expected_receiver_type_.is_null()) return true;
+    return receiver->IsInstanceOf(*expected_receiver_type_);
+  }
+
  private:
   void Initialize(Handle<JSFunction> function);
 
diff --git a/src/v8.cc b/src/v8.cc
index e47ff12..9dea902 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -112,7 +112,6 @@
   call_completed_callbacks_ = NULL;
 
   Sampler::TearDown();
-  OS::TearDown();
 }
 
 
@@ -169,7 +168,6 @@
 // purposes. So, we keep a different state to prevent informations
 // leaks that could be used in an exploit.
 uint32_t V8::RandomPrivate(Isolate* isolate) {
-  ASSERT(isolate == Isolate::Current());
   return random_base(isolate->private_random_seed());
 }
 
diff --git a/src/version.cc b/src/version.cc
index c45a498..18436f3 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     21
-#define BUILD_NUMBER      10
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      11
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 55e4a9b..f6cc449 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1191,7 +1191,7 @@
     Handle<JSObject> holder,
     Handle<Name> name,
     Label* success,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<Object> callback) {
   Label miss;
 
   Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1276,6 +1276,29 @@
 
 
 void BaseLoadStubCompiler::GenerateLoadCallback(
+    const CallOptimization& call_optimization) {
+  ASSERT(call_optimization.is_simple_api_call());
+
+  // Copy return value.
+  __ movq(scratch3(), Operand(rsp, 0));
+  // Assign stack space for the call arguments.
+  __ subq(rsp, Immediate((kFastApiCallArguments + 1) * kPointerSize));
+  // Move the return address on top of the stack.
+  __ movq(Operand(rsp, 0), scratch3());
+
+  int argc = 0;
+  int api_call_argc = argc + kFastApiCallArguments;
+  StackArgumentsAccessor args(rsp, api_call_argc);
+  // Write holder to stack frame.
+  __ movq(args.GetArgumentOperand(api_call_argc), receiver());
+  // Write receiver to stack frame.
+  __ movq(args.GetArgumentOperand(api_call_argc - 6), receiver());
+
+  GenerateFastApiCall(masm(), call_optimization, argc);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
     Register reg,
     Handle<ExecutableAccessorInfo> callback) {
   // Insert additional parameters into the stack frame above return address.
diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h
index 7fb25e8..3365493 100644
--- a/test/cctest/cctest.h
+++ b/test/cctest/cctest.h
@@ -91,6 +91,10 @@
 
   static v8::Isolate* isolate() { return default_isolate_; }
 
+  static i::Isolate* i_isolate() {
+    return reinterpret_cast<i::Isolate*>(default_isolate_);
+  }
+
   // Helper function to initialize the VM.
   static void InitializeVM(CcTestExtensionFlags extensions = NO_EXTENSIONS);
 
diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc
index b68b0b0..09b0644 100644
--- a/test/cctest/test-accessors.cc
+++ b/test/cctest/test-accessors.cc
@@ -163,6 +163,7 @@
 
 
 static void XGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  CHECK_EQ(x_receiver, info.Holder());
   XGetter(info, 1);
 }
 
@@ -172,6 +173,7 @@
   v8::Isolate* isolate = v8::Isolate::GetCurrent();
   CHECK_EQ(isolate, info.GetIsolate());
   CHECK_EQ(x_holder, info.This());
+  CHECK_EQ(x_holder, info.Holder());
   x_register[offset] = value->Int32Value();
 }
 
@@ -179,7 +181,6 @@
 static void XSetter(Local<String> name,
                     Local<Value> value,
                     const v8::PropertyCallbackInfo<void>& info) {
-  CHECK_EQ(x_holder, info.Holder());
   XSetter(value, info, 0);
 }
 
@@ -205,17 +206,23 @@
   v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(CompileRun(
     "obj.__proto__ = holder;"
     "var result = [];"
+    "var key_0 = 'x0';"
+    "var key_1 = 'x1';"
     "for (var i = 0; i < 10; i++) {"
     "  holder.x0 = i;"
-    "  holder.x1 = i;"
     "  result.push(obj.x0);"
+    "  holder.x1 = i;"
     "  result.push(obj.x1);"
+    "  holder[key_0] = i;"
+    "  result.push(obj[key_0]);"
+    "  holder[key_1] = i;"
+    "  result.push(obj[key_1]);"
     "}"
     "result"));
-  CHECK_EQ(20, array->Length());
-  for (int i = 0; i < 20; i++) {
+  CHECK_EQ(40, array->Length());
+  for (int i = 0; i < 40; i++) {
     v8::Handle<Value> entry = array->Get(v8::Integer::New(i));
-    CHECK_EQ(v8::Integer::New(i/2), entry);
+    CHECK_EQ(v8::Integer::New(i/4), entry);
   }
 }
 
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index e6e3bd1..670da58 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -186,10 +186,9 @@
 
 TEST(CodeRange) {
   const int code_range_size = 32*MB;
-  OS::SetUp();
-  Isolate::Current()->InitializeLoggingAndCounters();
-  CodeRange* code_range = new CodeRange(Isolate::Current());
-  code_range->SetUp(code_range_size);
+  CcTest::InitializeVM();
+  CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()));
+  code_range.SetUp(code_range_size);
   int current_allocated = 0;
   int total_allocated = 0;
   List<Block> blocks(1000);
@@ -205,9 +204,9 @@
           (Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
           Pseudorandom() % 5000 + 1;
       size_t allocated = 0;
-      Address base = code_range->AllocateRawMemory(requested,
-                                                   requested,
-                                                   &allocated);
+      Address base = code_range.AllocateRawMemory(requested,
+                                                  requested,
+                                                  &allocated);
       CHECK(base != NULL);
       blocks.Add(Block(base, static_cast<int>(allocated)));
       current_allocated += static_cast<int>(allocated);
@@ -215,7 +214,7 @@
     } else {
       // Free a block.
       int index = Pseudorandom() % blocks.length();
-      code_range->FreeRawMemory(blocks[index].base, blocks[index].size);
+      code_range.FreeRawMemory(blocks[index].base, blocks[index].size);
       current_allocated -= blocks[index].size;
       if (index < blocks.length() - 1) {
         blocks[index] = blocks.RemoveLast();
@@ -225,6 +224,5 @@
     }
   }
 
-  code_range->TearDown();
-  delete code_range;
+  code_range.TearDown();
 }
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index c0afb6d..e190b71 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -141,10 +141,13 @@
 
 
 static int signature_callback_count;
+static Local<Value> signature_expected_receiver;
 static void IncrementingSignatureCallback(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
   ApiTestFuzzer::Fuzz();
   signature_callback_count++;
+  CHECK_EQ(signature_expected_receiver, args.Holder());
+  CHECK_EQ(signature_expected_receiver, args.This());
   v8::Handle<v8::Array> result = v8::Array::New(args.Length());
   for (int i = 0; i < args.Length(); i++)
     result->Set(v8::Integer::New(i), args[i]);
@@ -213,26 +216,50 @@
       v8::FunctionTemplate::New(IncrementingSignatureCallback,
                                 v8::Handle<Value>(),
                                 sig));
+  fun->PrototypeTemplate()->SetAccessorProperty(
+      v8_str("n"),
+      v8::FunctionTemplate::New(IncrementingSignatureCallback,
+                                v8::Handle<Value>(),
+                                sig));
   env->Global()->Set(v8_str("Fun"), fun->GetFunction());
+  Local<Value> fun_instance = fun->InstanceTemplate()->NewInstance();
+  env->Global()->Set(v8_str("fun_instance"), fun_instance);
   signature_callback_count = 0;
+  int expected_count = 0;
+  signature_expected_receiver = fun_instance;
   CompileRun(
-      "var o = new Fun();"
-      "o.m();");
-  CHECK_EQ(1, signature_callback_count);
+      "var o = fun_instance;"
+      "var key_n = 'n';"
+      "for (var i = 0; i < 10; i++) o.m();"
+      "for (var i = 0; i < 10; i++) o.n;"
+      "for (var i = 0; i < 10; i++) o[key_n];");
+  expected_count += 30;
+  CHECK_EQ(expected_count, signature_callback_count);
   v8::Handle<v8::FunctionTemplate> sub_fun = v8::FunctionTemplate::New();
   sub_fun->Inherit(fun);
-  env->Global()->Set(v8_str("SubFun"), sub_fun->GetFunction());
+  fun_instance = sub_fun->InstanceTemplate()->NewInstance();
+  env->Global()->Set(v8_str("fun_instance"), fun_instance);
+  signature_expected_receiver = fun_instance;
   CompileRun(
-      "var o = new SubFun();"
-      "o.m();");
-  CHECK_EQ(2, signature_callback_count);
-
+      "var o = fun_instance;"
+      "var key_n = 'n';"
+      "for (var i = 0; i < 10; i++) o.m();"
+      "for (var i = 0; i < 10; i++) o.n;"
+      "for (var i = 0; i < 10; i++) o[key_n];");
+  expected_count += 30;
+  CHECK_EQ(expected_count, signature_callback_count);
   v8::TryCatch try_catch;
   CompileRun(
       "var o = { };"
       "o.m = Fun.prototype.m;"
       "o.m();");
-  CHECK_EQ(2, signature_callback_count);
+  CHECK_EQ(expected_count, signature_callback_count);
+  CHECK(try_catch.HasCaught());
+  CompileRun(
+      "var o = { };"
+      "o.n = Fun.prototype.n;"
+      "o.n;");
+  CHECK_EQ(expected_count, signature_callback_count);
   CHECK(try_catch.HasCaught());
   try_catch.Reset();
   v8::Handle<v8::FunctionTemplate> unrel_fun = v8::FunctionTemplate::New();
@@ -242,7 +269,14 @@
       "var o = new UnrelFun();"
       "o.m = Fun.prototype.m;"
       "o.m();");
-  CHECK_EQ(2, signature_callback_count);
+  CHECK_EQ(expected_count, signature_callback_count);
+  CHECK(try_catch.HasCaught());
+  try_catch.Reset();
+  CompileRun(
+      "var o = new UnrelFun();"
+      "o.n = Fun.prototype.n;"
+      "o.n;");
+  CHECK_EQ(expected_count, signature_callback_count);
   CHECK(try_catch.HasCaught());
 }
 
diff --git a/test/cctest/test-circular-queue.cc b/test/cctest/test-circular-queue.cc
index 1d6775d..c900be1 100644
--- a/test/cctest/test-circular-queue.cc
+++ b/test/cctest/test-circular-queue.cc
@@ -41,7 +41,7 @@
 
   // Check that we are using non-reserved values.
   // Fill up the first chunk.
-  CHECK_EQ(NULL, scq.StartDequeue());
+  CHECK_EQ(NULL, scq.Peek());
   for (Record i = 1; i < 1 + kMaxRecordsInQueue; ++i) {
     Record* rec = reinterpret_cast<Record*>(scq.StartEnqueue());
     CHECK_NE(NULL, rec);
@@ -53,27 +53,27 @@
   CHECK_EQ(NULL, scq.StartEnqueue());
 
   // Try to enqueue when the the queue is full. Consumption must be available.
-  CHECK_NE(NULL, scq.StartDequeue());
+  CHECK_NE(NULL, scq.Peek());
   for (int i = 0; i < 10; ++i) {
     Record* rec = reinterpret_cast<Record*>(scq.StartEnqueue());
     CHECK_EQ(NULL, rec);
-    CHECK_NE(NULL, scq.StartDequeue());
+    CHECK_NE(NULL, scq.Peek());
   }
 
   // Consume all records.
   for (Record i = 1; i < 1 + kMaxRecordsInQueue; ++i) {
-    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
+    Record* rec = reinterpret_cast<Record*>(scq.Peek());
     CHECK_NE(NULL, rec);
     CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
-    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
-    scq.FinishDequeue();
-    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
+    scq.Remove();
+    CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
   }
   // The queue is empty.
-  CHECK_EQ(NULL, scq.StartDequeue());
+  CHECK_EQ(NULL, scq.Peek());
 
 
-  CHECK_EQ(NULL, scq.StartDequeue());
+  CHECK_EQ(NULL, scq.Peek());
   for (Record i = 0; i < kMaxRecordsInQueue / 2; ++i) {
     Record* rec = reinterpret_cast<Record*>(scq.StartEnqueue());
     CHECK_NE(NULL, rec);
@@ -82,18 +82,18 @@
   }
 
   // Consume all available kMaxRecordsInQueue / 2 records.
-  CHECK_NE(NULL, scq.StartDequeue());
+  CHECK_NE(NULL, scq.Peek());
   for (Record i = 0; i < kMaxRecordsInQueue / 2; ++i) {
-    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
+    Record* rec = reinterpret_cast<Record*>(scq.Peek());
     CHECK_NE(NULL, rec);
     CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
-    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
-    scq.FinishDequeue();
-    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
+    scq.Remove();
+    CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
   }
 
   // The queue is empty.
-  CHECK_EQ(NULL, scq.StartDequeue());
+  CHECK_EQ(NULL, scq.Peek());
 }
 
 
@@ -148,41 +148,41 @@
   ProducerThread producer2(&scq, kRecordsPerChunk, 10, &semaphore);
   ProducerThread producer3(&scq, kRecordsPerChunk, 20, &semaphore);
 
-  CHECK_EQ(NULL, scq.StartDequeue());
+  CHECK_EQ(NULL, scq.Peek());
   producer1.Start();
   semaphore.Wait();
   for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
-    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
+    Record* rec = reinterpret_cast<Record*>(scq.Peek());
     CHECK_NE(NULL, rec);
     CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
-    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
-    scq.FinishDequeue();
-    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
+    scq.Remove();
+    CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
   }
 
-  CHECK_EQ(NULL, scq.StartDequeue());
+  CHECK_EQ(NULL, scq.Peek());
   producer2.Start();
   semaphore.Wait();
   for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
-    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
+    Record* rec = reinterpret_cast<Record*>(scq.Peek());
     CHECK_NE(NULL, rec);
     CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
-    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
-    scq.FinishDequeue();
-    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
+    scq.Remove();
+    CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
   }
 
-  CHECK_EQ(NULL, scq.StartDequeue());
+  CHECK_EQ(NULL, scq.Peek());
   producer3.Start();
   semaphore.Wait();
   for (Record i = 20; i < 20 + kRecordsPerChunk; ++i) {
-    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
+    Record* rec = reinterpret_cast<Record*>(scq.Peek());
     CHECK_NE(NULL, rec);
     CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
-    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
-    scq.FinishDequeue();
-    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
+    scq.Remove();
+    CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
   }
 
-  CHECK_EQ(NULL, scq.StartDequeue());
+  CHECK_EQ(NULL, scq.Peek());
 }
diff --git a/test/cctest/test-code-stubs-arm.cc b/test/cctest/test-code-stubs-arm.cc
index cc51e83..c99433e 100644
--- a/test/cctest/test-code-stubs-arm.cc
+++ b/test/cctest/test-code-stubs-arm.cc
@@ -63,8 +63,11 @@
   __ Push(r7, r6, r5, r4);
   __ Push(lr);
 
+  // For softfp, move the input value into d0.
+  if (!masm.use_eabi_hardfloat()) {
+    __ vmov(d0, r0, r1);
+  }
   // Push the double argument.
-  __ vmov(d0, r0, r1);
   __ sub(sp, sp, Operand(kDoubleSize));
   __ vstr(d0, sp, 0);
   if (!source_reg.is(sp)) {
@@ -124,6 +127,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
+  CPU::FlushICache(buffer, actual_size);
   return (reinterpret_cast<ConvertDToIFunc>(
       reinterpret_cast<intptr_t>(buffer)));
 }
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index b5ba46c..7e87e10 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -116,7 +116,7 @@
 }
 
 
-static double Inc(int x) {
+static double Inc(Isolate* isolate, int x) {
   const char* source = "result = %d + 1;";
   EmbeddedVector<char, 512> buffer;
   OS::SNPrintF(buffer, source, x);
@@ -125,8 +125,8 @@
   if (fun.is_null()) return -1;
 
   bool has_pending_exception;
-  Handle<JSObject> global(Isolate::Current()->context()->global_object());
-  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Handle<JSObject> global(isolate->context()->global_object());
+  Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
   CHECK(!has_pending_exception);
   return GetGlobalProperty("result")->ToObjectChecked()->Number();
 }
@@ -135,19 +135,19 @@
 TEST(Inc) {
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());
-  CHECK_EQ(4.0, Inc(3));
+  CHECK_EQ(4.0, Inc(CcTest::i_isolate(), 3));
 }
 
 
-static double Add(int x, int y) {
+static double Add(Isolate* isolate, int x, int y) {
   Handle<JSFunction> fun = Compile("result = x + y;");
   if (fun.is_null()) return -1;
 
   SetGlobalProperty("x", Smi::FromInt(x));
   SetGlobalProperty("y", Smi::FromInt(y));
   bool has_pending_exception;
-  Handle<JSObject> global(Isolate::Current()->context()->global_object());
-  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Handle<JSObject> global(isolate->context()->global_object());
+  Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
   CHECK(!has_pending_exception);
   return GetGlobalProperty("result")->ToObjectChecked()->Number();
 }
@@ -156,18 +156,18 @@
 TEST(Add) {
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());
-  CHECK_EQ(5.0, Add(2, 3));
+  CHECK_EQ(5.0, Add(CcTest::i_isolate(), 2, 3));
 }
 
 
-static double Abs(int x) {
+static double Abs(Isolate* isolate, int x) {
   Handle<JSFunction> fun = Compile("if (x < 0) result = -x; else result = x;");
   if (fun.is_null()) return -1;
 
   SetGlobalProperty("x", Smi::FromInt(x));
   bool has_pending_exception;
-  Handle<JSObject> global(Isolate::Current()->context()->global_object());
-  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Handle<JSObject> global(isolate->context()->global_object());
+  Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
   CHECK(!has_pending_exception);
   return GetGlobalProperty("result")->ToObjectChecked()->Number();
 }
@@ -176,19 +176,19 @@
 TEST(Abs) {
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());
-  CHECK_EQ(3.0, Abs(-3));
+  CHECK_EQ(3.0, Abs(CcTest::i_isolate(), -3));
 }
 
 
-static double Sum(int n) {
+static double Sum(Isolate* isolate, int n) {
   Handle<JSFunction> fun =
       Compile("s = 0; while (n > 0) { s += n; n -= 1; }; result = s;");
   if (fun.is_null()) return -1;
 
   SetGlobalProperty("n", Smi::FromInt(n));
   bool has_pending_exception;
-  Handle<JSObject> global(Isolate::Current()->context()->global_object());
-  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Handle<JSObject> global(isolate->context()->global_object());
+  Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
   CHECK(!has_pending_exception);
   return GetGlobalProperty("result")->ToObjectChecked()->Number();
 }
@@ -197,7 +197,7 @@
 TEST(Sum) {
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());
-  CHECK_EQ(5050.0, Sum(100));
+  CHECK_EQ(5050.0, Sum(CcTest::i_isolate(), 100));
 }
 
 
@@ -208,8 +208,9 @@
   Handle<JSFunction> fun = Compile(source);
   if (fun.is_null()) return;
   bool has_pending_exception;
-  Handle<JSObject> global(Isolate::Current()->context()->global_object());
-  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Handle<JSObject> global(CcTest::i_isolate()->context()->global_object());
+  Execution::Call(
+      CcTest::i_isolate(), fun, global, 0, NULL, &has_pending_exception);
   CHECK(!has_pending_exception);
 }
 
@@ -241,8 +242,9 @@
   Handle<JSFunction> fun = Compile(source);
   CHECK(!fun.is_null());
   bool has_pending_exception;
-  Handle<JSObject> global(Isolate::Current()->context()->global_object());
-  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Handle<JSObject> global(CcTest::i_isolate()->context()->global_object());
+  Execution::Call(
+      CcTest::i_isolate(), fun, global, 0, NULL, &has_pending_exception);
   CHECK(!has_pending_exception);
   CHECK_EQ(511.0, GetGlobalProperty("r")->ToObjectChecked()->Number());
 }
@@ -258,7 +260,7 @@
   bool has_pending_exception;
   Isolate* isolate = fun->GetIsolate();
   Handle<JSObject> global(isolate->context()->global_object());
-  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
   CHECK(has_pending_exception);
   CHECK_EQ(42.0, isolate->pending_exception()->ToObjectChecked()->Number());
 }
@@ -282,8 +284,9 @@
 
   // Run the generated code to populate the global object with 'foo'.
   bool has_pending_exception;
-  Handle<JSObject> global(Isolate::Current()->context()->global_object());
-  Execution::Call(fun0, global, 0, NULL, &has_pending_exception);
+  Handle<JSObject> global(isolate->context()->global_object());
+  Execution::Call(
+      isolate, fun0, global, 0, NULL, &has_pending_exception);
   CHECK(!has_pending_exception);
 
   Object* foo_string = isolate->factory()->InternalizeOneByteString(
@@ -295,7 +298,8 @@
 
   Handle<Object> argv[] = { isolate->factory()->InternalizeOneByteString(
       STATIC_ASCII_VECTOR("hello")) };
-  Execution::Call(Handle<JSFunction>::cast(fun1),
+  Execution::Call(isolate,
+                  Handle<JSFunction>::cast(fun1),
                   global,
                   ARRAY_SIZE(argv),
                   argv,
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index 99c3970..80b276c 100644
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -1067,8 +1067,8 @@
       i::GetProperty(builtins, "FormatMessage");
   i::Handle<i::Object> arg_handles[] = { format, args_array };
   bool has_exception = false;
-  i::Handle<i::Object> result =
-      i::Execution::Call(format_fun, builtins, 2, arg_handles, &has_exception);
+  i::Handle<i::Object> result = i::Execution::Call(
+      isolate, format_fun, builtins, 2, arg_handles, &has_exception);
   CHECK(!has_exception);
   CHECK(result->IsString());
   for (int i = 0; i < args.length(); i++) {
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index 5dd92b6..c84003f 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -782,3 +782,49 @@
 
   profiler->StopProfiling("LineNumber");
 }
+
+
+
+TEST(BailoutReason) {
+  const char* extensions[] = { "v8/profiler" };
+  v8::ExtensionConfiguration config(1, extensions);
+  LocalContext env(&config);
+  v8::HandleScope hs(env->GetIsolate());
+
+  v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
+  CHECK_EQ(0, profiler->GetProfileCount());
+  v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::New(
+      "function TryCatch() {\n"
+      "  try {\n"
+      "    startProfiling();\n"
+      "  } catch (e) { };\n"
+      "}\n"
+      "function TryFinally() {\n"
+      "  try {\n"
+      "    TryCatch();\n"
+      "  } finally { };\n"
+      "}\n"
+      "TryFinally();\n"
+      "stopProfiling();"));
+  script->Run();
+  CHECK_EQ(1, profiler->GetProfileCount());
+  const v8::CpuProfile* profile = profiler->GetCpuProfile(0);
+  const v8::CpuProfileNode* current = profile->GetTopDownRoot();
+  reinterpret_cast<ProfileNode*>(
+      const_cast<v8::CpuProfileNode*>(current))->Print(0);
+  // The tree should look like this:
+  //  (root)
+  //   (anonymous function)
+  //     kTryFinally
+  //       kTryCatch
+  current = PickChild(current, i::ProfileGenerator::kAnonymousFunctionName);
+  CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
+
+  current = PickChild(current, "TryFinally");
+  CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
+  CHECK(!strcmp("TryFinallyStatement", current->GetBailoutReason()));
+
+  current = PickChild(current, "TryCatch");
+  CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
+  CHECK(!strcmp("TryCatchStatement", current->GetBailoutReason()));
+}
diff --git a/test/cctest/test-random.cc b/test/cctest/test-random.cc
index 2f7ab7d..804546a 100644
--- a/test/cctest/test-random.cc
+++ b/test/cctest/test-random.cc
@@ -53,8 +53,8 @@
   Handle<ByteArray> seeds(context->random_seed());
 
   SetSeeds(seeds, state0, state1);
-  Handle<Object> value =
-      Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Handle<Object> value = Execution::Call(
+      context->GetIsolate(), fun, global, 0, NULL, &has_pending_exception);
   CHECK(value->IsHeapNumber());
   CHECK(fun->IsOptimized());
   double crankshaft_value = HeapNumber::cast(*value)->value();
@@ -70,11 +70,12 @@
   v8::V8::Initialize();
   // Skip test if crankshaft is disabled.
   if (!Isolate::Current()->use_crankshaft()) return;
-  v8::Isolate* isolate = v8::Isolate::GetCurrent();
-  v8::HandleScope scope(isolate);
-  v8::Context::Scope context_scope(v8::Context::New(isolate));
+  v8::Isolate* v8_isolate = v8::Isolate::GetCurrent();
+  v8::HandleScope scope(v8_isolate);
+  v8::Context::Scope context_scope(v8::Context::New(v8_isolate));
 
-  Handle<Context> context(Isolate::Current()->context());
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  Handle<Context> context(isolate->context());
   Handle<JSObject> global(context->global_object());
   Handle<ByteArray> seeds(context->random_seed());
   bool has_pending_exception;
@@ -88,8 +89,8 @@
   Handle<JSFunction> fun(JSFunction::cast(fun_object->ToObjectChecked()));
 
   // Optimize function.
-  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
-  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
+  Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
   if (!fun->IsOptimized()) fun->MarkForLazyRecompilation();
 
   // Test with some random values.
@@ -100,7 +101,7 @@
   // Test that we bail out to runtime when seeds are uninitialized (zeros).
   SetSeeds(seeds, 0, 0);
   Handle<Object> value =
-      Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+      Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
   CHECK(value->IsHeapNumber());
   CHECK(fun->IsOptimized());
   double crankshaft_value = HeapNumber::cast(*value)->value();
diff --git a/test/mjsunit/regress/post-increment-close-context.js b/test/mjsunit/regress/post-increment-close-context.js
new file mode 100644
index 0000000..08ade10
--- /dev/null
+++ b/test/mjsunit/regress/post-increment-close-context.js
@@ -0,0 +1,42 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var foo = {bar: -2};
+function crash() {
+  return !(foo.bar++);
+}
+assertFalse(crash());
+assertEquals(-1, foo.bar);
+%OptimizeFunctionOnNextCall(crash);
+assertFalse(crash());
+assertEquals(0, foo.bar);
+assertTrue(crash());
+assertEquals(1, foo.bar);
+assertFalse(crash());
+assertEquals(2, foo.bar);
diff --git a/test/mjsunit/regress/regress-regexp-construct-result.js b/test/mjsunit/regress/regress-regexp-construct-result.js
new file mode 100644
index 0000000..84bdd20
--- /dev/null
+++ b/test/mjsunit/regress/regress-regexp-construct-result.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Create a huge regexp with many alternative capture groups, most of
+// which do not capture anything, but the corresponding capture slot
+// in the result object has to exist, even though filled with undefined.
+// Having a large result array helps stressing GC.
+
+var num_captures = 1000;
+var regexp_string = "(a)";
+for (var i = 0; i < num_captures - 1; i++) {
+  regexp_string += "|(b)";
+}
+var regexp = new RegExp(regexp_string);
+
+for (var i = 0; i < 10; i++) {
+  var matches = regexp.exec("a");
+  var count = 0;
+  matches.forEach(function() { count++; });
+  assertEquals(num_captures + 1, count);
+}
diff --git a/test/mjsunit/track-fields.js b/test/mjsunit/track-fields.js
index 8b0ec29..4da1ab5 100644
--- a/test/mjsunit/track-fields.js
+++ b/test/mjsunit/track-fields.js
@@ -211,7 +211,6 @@
 delete o18.to_delete;
 delete o19.to_delete;
 
-assertTrue(%HaveSameMap(o18, o19));
 assertEquals(1, o18.field2);
 assertEquals(1.6, o19.field2);
 
diff --git a/tools/plot-timer-events b/tools/plot-timer-events
index 0723150..8db067d 100755
--- a/tools/plot-timer-events
+++ b/tools/plot-timer-events
@@ -67,4 +67,13 @@
     $tools_path/codemap.js $tools_path/profile.js $tools_path/profile_view.js \
     $tools_path/logreader.js $tools_path/tickprocessor.js \
     $tools_path/profviz/composer.js $tools_path/profviz/stdio.js \
-    -- $@ $options 2>/dev/null | gnuplot > timer-events.png
+    -- $@ $options 2>/dev/null > timer-events.plot
+
+success=$?
+if [[ $success != 0 ]] ; then
+    cat timer-events.plot
+else
+    cat timer-events.plot | gnuplot > timer-events.png
+fi
+
+rm -f timer-events.plot
diff --git a/tools/profviz/composer.js b/tools/profviz/composer.js
index fe0bf00..44dd763 100644
--- a/tools/profviz/composer.js
+++ b/tools/profviz/composer.js
@@ -31,7 +31,7 @@
 }
 
 
-function PlotScriptComposer(kResX, kResY) {
+function PlotScriptComposer(kResX, kResY, error_output) {
   // Constants.
   var kV8BinarySuffixes = ["/d8", "/libv8.so"];
   var kStackFrames = 8;             // Stack frames to display in the plot.
@@ -149,7 +149,10 @@
 
   // Utility functions.
   function assert(something, message) {
-    if (!something) print(new Error(message).stack);
+    if (!something) {
+      var error = new Error(message);
+      error_output(error.stack);
+    }
   }
 
   function FindCodeKind(kind) {
@@ -208,10 +211,15 @@
   // Public methods.
   this.collectData = function(input, distortion_per_entry) {
 
+    var last_timestamp = 0;
+
     // Parse functions.
     var parseTimeStamp = function(timestamp) {
+      int_timestamp = parseInt(timestamp);
+      assert(int_timestamp >= last_timestamp, "Inconsistent timestamps.");
+      last_timestamp = int_timestamp;
       distortion += distortion_per_entry;
-      return parseInt(timestamp) / 1000 - distortion;
+      return int_timestamp / 1000 - distortion;
     }
 
     var processTimerEventStart = function(name, start) {
@@ -260,65 +268,6 @@
       code_map.deleteCode(address);
     };
 
-    var processSharedLibrary = function(name, start, end) {
-      var code_entry = new CodeMap.CodeEntry(end - start, name);
-      code_entry.kind = -2;  // External code kind.
-      for (var i = 0; i < kV8BinarySuffixes.length; i++) {
-        var suffix = kV8BinarySuffixes[i];
-        if (name.indexOf(suffix, name.length - suffix.length) >= 0) {
-          code_entry.kind = -1;  // V8 runtime code kind.
-          break;
-        }
-      }
-      code_map.addLibrary(start, code_entry);
-    };
-
-    var processTimerEventStart = function(name, start) {
-      // Find out the thread id.
-      var new_event = TimerEvents[name];
-      if (new_event === undefined) return;
-      var thread_id = new_event.thread_id;
-
-      start = Math.max(last_time_stamp[thread_id] + kMinRangeLength, start);
-
-      // Last event on this thread is done with the start of this event.
-      var last_event = event_stack[thread_id].top();
-      if (last_event !== undefined) {
-        var new_range = new Range(last_time_stamp[thread_id], start);
-        last_event.ranges.push(new_range);
-      }
-      event_stack[thread_id].push(new_event);
-      last_time_stamp[thread_id] = start;
-    };
-
-    var processTimerEventEnd = function(name, end) {
-      // Find out about the thread_id.
-      var finished_event = TimerEvents[name];
-      var thread_id = finished_event.thread_id;
-      assert(finished_event === event_stack[thread_id].pop(),
-             "inconsistent event stack");
-
-      end = Math.max(last_time_stamp[thread_id] + kMinRangeLength, end);
-
-      var new_range = new Range(last_time_stamp[thread_id], end);
-      finished_event.ranges.push(new_range);
-      last_time_stamp[thread_id] = end;
-    };
-
-    var processCodeCreateEvent = function(type, kind, address, size, name) {
-      var code_entry = new CodeMap.CodeEntry(size, name);
-      code_entry.kind = kind;
-      code_map.addCode(address, code_entry);
-    };
-
-    var processCodeMoveEvent = function(from, to) {
-      code_map.moveCode(from, to);
-    };
-
-    var processCodeDeleteEvent = function(address) {
-      code_map.deleteCode(address);
-    };
-
     var processCodeDeoptEvent = function(time, size) {
       deopts.push(new Deopt(time, size));
     }
diff --git a/tools/profviz/profviz.html b/tools/profviz/profviz.html
index 30494f8..d7dd9cb 100644
--- a/tools/profviz/profviz.html
+++ b/tools/profviz/profviz.html
@@ -96,7 +96,7 @@
         or alternatively,<br/>
         Chrome with
         <span class="tt">
-          --no-sandbox --js-flags="--prof --noprof-lazy --log-timer-events
+          --no-sandbox --js-flags="--prof --log-timer-events"
         </span> to produce <span class="tt">v8.log</span>.
       </li>
       <li>
@@ -117,6 +117,11 @@
           command-line utility
         </a> instead.
       </li>
+    </ol>
+    If you expect multiple V8 instances to run concurrently, for example
+    with several tabs in Chrome,<br/>
+    add the V8 flag <span class="tt">--logfile=v8.%p.log</span>
+    so that each instance writes to its own log file.
     </div>
   </div>
 
diff --git a/tools/profviz/profviz.js b/tools/profviz/profviz.js
index 7af12ad..8ac0881 100644
--- a/tools/profviz/profviz.js
+++ b/tools/profviz/profviz.js
@@ -42,15 +42,6 @@
 function plotWorker() {
   var worker = null;
 
-  var delegateList = {
-    "log"         : log,
-    "error"       : logError,
-    "displayplot" : displayplot,
-    "displayprof" : displayprof,
-    "range"       : setRange,
-    "script"      : scriptLoaded
-  }
-
   function initialize() {
     ui.freeze();
     worker = new Worker("worker.js");
@@ -89,6 +80,16 @@
     if (worker) worker.terminate();
     initialize();
   }
+
+  var delegateList = {
+    "log"         : log,
+    "error"       : logError,
+    "displayplot" : displayplot,
+    "displayprof" : displayprof,
+    "range"       : setRange,
+    "script"      : scriptLoaded,
+    "reset"       : this.reset
+  }
 }
 
 
@@ -233,9 +234,6 @@
 function getSelectedFile() {
   var file = ui.file.files[0];
   if (!file) throw Error("No valid file selected.");
-  if (!file.type.toString().match(/text/)) {
-    throw Error("'" + escape(file.name) + "' is not a text file.");
-  }
   return file;
 }
 
diff --git a/tools/profviz/stdio.js b/tools/profviz/stdio.js
index e800149..db38f04 100644
--- a/tools/profviz/stdio.js
+++ b/tools/profviz/stdio.js
@@ -44,7 +44,11 @@
 
 var kResX = 1600;
 var kResY = 600;
-var psc = new PlotScriptComposer(kResX, kResY);
+function log_error(text) {
+  print(text);
+  quit(1);
+}
+var psc = new PlotScriptComposer(kResX, kResY, log_error);
 psc.collectData(readline, distortion_per_entry);
 psc.findPlotRange(range_start_override, range_end_override);
 print("set terminal pngcairo size " + kResX + "," + kResY +
diff --git a/tools/profviz/worker.js b/tools/profviz/worker.js
index 60a557f..b17ca29 100644
--- a/tools/profviz/worker.js
+++ b/tools/profviz/worker.js
@@ -72,6 +72,12 @@
 }
 
 
+function log_error(text) {
+  self.postMessage({"call": "error", "args": text});
+  self.postMessage({"call": "reset"});
+}
+
+
 function run(args) {
   var file = args["file"];
   var resx = args["resx"];
@@ -121,7 +127,7 @@
   var input_file_name = "input_temp";
   var output_file_name = "output.svg";
 
-  var psc = new PlotScriptComposer(resx, resy);
+  var psc = new PlotScriptComposer(resx, resy, log_error);
   var objects = 0;
 
   time("Collecting events (" + content_lines.length + " entries)",