Version 3.30.21 (based on 486433415b65d9e79dad68490b9c02e35a3760c0)
MIPS: Classes: Add basic support for properties (issue 3330).
Classes: Add more tests for prototype edge cases (Chromium issue 3655).
Classes: Add test for method prototype (issue 3330).
Get stack trace for uncaught exceptions/promise rejections from the simple stack when available.
Classes: Add basic support for properties (issue 3330).
Allow duplicate property names in classes (issue 3570).
Windows: use SystemTimeToTzSpecificLocalTime instead of localtime_s (Chromium issue 417640).
Performance and stability improvements on all platforms.
Cr-Commit-Position: refs/heads/candidates@{#24982}
git-svn-id: https://v8.googlecode.com/svn/trunk@24982 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/BUILD.gn b/BUILD.gn
index be1c01b..7d6bf41 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -480,6 +480,8 @@
"src/compiler/access-builder.h",
"src/compiler/ast-graph-builder.cc",
"src/compiler/ast-graph-builder.h",
+ "src/compiler/ast-loop-assignment-analyzer.cc",
+ "src/compiler/ast-loop-assignment-analyzer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
"src/compiler/change-lowering.cc",
@@ -641,7 +643,6 @@
"src/factory.h",
"src/fast-dtoa.cc",
"src/fast-dtoa.h",
- "src/feedback-slots.h",
"src/field-index.h",
"src/field-index-inl.h",
"src/fixed-dtoa.cc",
@@ -917,7 +918,6 @@
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
"src/unique.h",
- "src/uri.h",
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
diff --git a/ChangeLog b/ChangeLog
index 97d2d18..85610a0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,24 @@
+2014-10-29: Version 3.30.21
+
+ MIPS: Classes: Add basic support for properties (issue 3330).
+
+ Classes: Add more tests for prototype edge cases (Chromium issue 3655).
+
+ Classes: Add test for method prototype (issue 3330).
+
+ Get stack trace for uncaught exceptions/promise rejections from the
+ simple stack when available.
+
+ Classes: Add basic support for properties (issue 3330).
+
+ Allow duplicate property names in classes (issue 3570).
+
+ Windows: use SystemTimeToTzSpecificLocalTime instead of localtime_s
+ (Chromium issue 417640).
+
+ Performance and stability improvements on all platforms.
+
+
2014-10-28: Version 3.30.20
Performance and stability improvements on all platforms.
diff --git a/OWNERS b/OWNERS
index f67b3ec..aa5f644 100644
--- a/OWNERS
+++ b/OWNERS
@@ -16,7 +16,6 @@
svenpanne@chromium.org
titzer@chromium.org
ulan@chromium.org
-vegorov@chromium.org
verwaest@chromium.org
vogelheim@chromium.org
yangguo@chromium.org
diff --git a/include/v8.h b/include/v8.h
index f70f457..92eb11b 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -4401,6 +4401,27 @@
/**
+ * Interface for iterating through all external resources in the heap.
+ */
+class V8_EXPORT ExternalResourceVisitor { // NOLINT
+ public:
+ virtual ~ExternalResourceVisitor() {}
+ virtual void VisitExternalString(Handle<String> string) {}
+};
+
+
+/**
+ * Interface for iterating through all the persistent handles in the heap.
+ */
+class V8_EXPORT PersistentHandleVisitor { // NOLINT
+ public:
+ virtual ~PersistentHandleVisitor() {}
+ virtual void VisitPersistentHandle(Persistent<Value>* value,
+ uint16_t class_id) {}
+};
+
+
+/**
* Isolate represents an isolated instance of the V8 engine. V8 isolates have
* completely separate states. Objects from one isolate must not be used in
* other isolates. The embedder can create multiple isolates and use them in
@@ -4759,6 +4780,42 @@
*/
void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+
+ /**
+ * Forcefully terminate the current thread of JavaScript execution
+ * in the given isolate.
+ *
+ * This method can be used by any thread even if that thread has not
+ * acquired the V8 lock with a Locker object.
+ */
+ void TerminateExecution();
+
+ /**
+ * Is V8 terminating JavaScript execution.
+ *
+ * Returns true if JavaScript execution is currently terminating
+ * because of a call to TerminateExecution. In that case there are
+ * still JavaScript frames on the stack and the termination
+ * exception is still active.
+ */
+ bool IsExecutionTerminating();
+
+ /**
+ * Resume execution capability in the given isolate, whose execution
+ * was previously forcefully terminated using TerminateExecution().
+ *
+ * When execution is forcefully terminated using TerminateExecution(),
+ * the isolate can not resume execution until all JavaScript frames
+ * have propagated the uncatchable exception which is generated. This
+ * method allows the program embedding the engine to handle the
+ * termination event and resume execution capability, even if
+ * JavaScript frames remain on the stack.
+ *
+ * This method can be used by any thread even if that thread has not
+ * acquired the V8 lock with a Locker object.
+ */
+ void CancelTerminateExecution();
+
/**
* Request V8 to interrupt long running JavaScript code and invoke
* the given |callback| passing the given |data| to it. After |callback|
@@ -4940,6 +4997,84 @@
*/
void GetCodeRange(void** start, size_t* length_in_bytes);
+ /** Set the callback to invoke in case of fatal errors. */
+ void SetFatalErrorHandler(FatalErrorCallback that);
+
+ /**
+ * Set the callback to invoke to check if code generation from
+ * strings should be allowed.
+ */
+ void SetAllowCodeGenerationFromStringsCallback(
+ AllowCodeGenerationFromStringsCallback callback);
+
+ /**
+ * Check if V8 is dead and therefore unusable. This is the case after
+ * fatal errors such as out-of-memory situations.
+ */
+ bool IsDead();
+
+ /**
+ * Adds a message listener.
+ *
+ * The same message listener can be added more than once and in that
+ * case it will be called more than once for each message.
+ *
+ * If data is specified, it will be passed to the callback when it is called.
+ * Otherwise, the exception object will be passed to the callback instead.
+ */
+ bool AddMessageListener(MessageCallback that,
+ Handle<Value> data = Handle<Value>());
+
+ /**
+ * Remove all message listeners from the specified callback function.
+ */
+ void RemoveMessageListeners(MessageCallback that);
+
+ /** Callback function for reporting failed access checks.*/
+ void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
+
+ /**
+ * Tells V8 to capture current stack trace when uncaught exception occurs
+ * and report it to the message listeners. The option is off by default.
+ */
+ void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture, int frame_limit = 10,
+ StackTrace::StackTraceOptions options = StackTrace::kOverview);
+
+ /**
+ * Enables the host application to provide a mechanism to be notified
+ * and perform custom logging when V8 Allocates Executable Memory.
+ */
+ void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space, AllocationAction action);
+
+ /**
+ * Removes callback that was installed by AddMemoryAllocationCallback.
+ */
+ void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+
+ /**
+ * Iterates through all external resources referenced from current isolate
+ * heap. GC is not invoked prior to iterating, therefore there is no
+ * guarantee that visited objects are still alive.
+ */
+ void VisitExternalResources(ExternalResourceVisitor* visitor);
+
+ /**
+ * Iterates through all the persistent handles in the current isolate's heap
+ * that have class_ids.
+ */
+ void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
+
+ /**
+ * Iterates through all the persistent handles in the current isolate's heap
+ * that have class_ids and are candidates to be marked as partially dependent
+ * handles. This will visit handles to young objects created since the last
+ * garbage collection but is free to visit an arbitrary superset of these
+ * objects.
+ */
+ void VisitHandlesForPartialDependence(PersistentHandleVisitor* visitor);
+
private:
template<class K, class V, class Traits> friend class PersistentValueMap;
@@ -5019,39 +5154,20 @@
/**
- * Interface for iterating through all external resources in the heap.
- */
-class V8_EXPORT ExternalResourceVisitor { // NOLINT
- public:
- virtual ~ExternalResourceVisitor() {}
- virtual void VisitExternalString(Handle<String> string) {}
-};
-
-
-/**
- * Interface for iterating through all the persistent handles in the heap.
- */
-class V8_EXPORT PersistentHandleVisitor { // NOLINT
- public:
- virtual ~PersistentHandleVisitor() {}
- virtual void VisitPersistentHandle(Persistent<Value>* value,
- uint16_t class_id) {}
-};
-
-
-/**
* Container class for static utility functions.
*/
class V8_EXPORT V8 {
public:
/** Set the callback to invoke in case of fatal errors. */
- static void SetFatalErrorHandler(FatalErrorCallback that);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void SetFatalErrorHandler(FatalErrorCallback that);
/**
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
- static void SetAllowCodeGenerationFromStringsCallback(
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback that);
/**
@@ -5063,10 +5179,11 @@
static void SetArrayBufferAllocator(ArrayBuffer::Allocator* allocator);
/**
- * Check if V8 is dead and therefore unusable. This is the case after
- * fatal errors such as out-of-memory situations.
- */
- static bool IsDead();
+ * Check if V8 is dead and therefore unusable. This is the case after
+ * fatal errors such as out-of-memory situations.
+ */
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static bool IsDead();
/**
* The following 4 functions are to be used when V8 is built with
@@ -5119,21 +5236,23 @@
* If data is specified, it will be passed to the callback when it is called.
* Otherwise, the exception object will be passed to the callback instead.
*/
- static bool AddMessageListener(MessageCallback that,
- Handle<Value> data = Handle<Value>());
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static bool AddMessageListener(
+ MessageCallback that, Handle<Value> data = Handle<Value>());
/**
* Remove all message listeners from the specified callback function.
*/
- static void RemoveMessageListeners(MessageCallback that);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void RemoveMessageListeners(MessageCallback that);
/**
* Tells V8 to capture current stack trace when uncaught exception occurs
* and report it to the message listeners. The option is off by default.
*/
- static void SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit = 10,
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture, int frame_limit = 10,
StackTrace::StackTraceOptions options = StackTrace::kOverview);
/**
@@ -5152,7 +5271,9 @@
static const char* GetVersion();
/** Callback function for reporting failed access checks.*/
- static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void SetFailedAccessCheckCallbackFunction(
+ FailedAccessCheckCallback);
/**
* Enables the host application to receive a notification before a
@@ -5164,6 +5285,7 @@
* register the same callback function two times with different
* GCType filters.
*/
+ // TODO(dcarney): deprecate this.
static void AddGCPrologueCallback(
GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
@@ -5171,7 +5293,8 @@
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
- static void RemoveGCPrologueCallback(GCPrologueCallback callback);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void RemoveGCPrologueCallback(GCPrologueCallback callback);
/**
* Enables the host application to receive a notification after a
@@ -5183,6 +5306,7 @@
* register the same callback function two times with different
* GCType filters.
*/
+ // TODO(dcarney): deprecate this.
static void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
@@ -5190,20 +5314,24 @@
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
- static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
/**
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
- static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void AddMemoryAllocationCallback(
+ MemoryAllocationCallback callback, ObjectSpace space,
+ AllocationAction action);
/**
* Removes callback that was installed by AddMemoryAllocationCallback.
*/
- static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void RemoveMemoryAllocationCallback(
+ MemoryAllocationCallback callback);
/**
* Initializes V8. This function needs to be called before the first Isolate
@@ -5233,7 +5361,8 @@
*
* \param isolate The isolate in which to terminate the current JS execution.
*/
- static void TerminateExecution(Isolate* isolate);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void TerminateExecution(Isolate* isolate);
/**
* Is V8 terminating JavaScript execution.
@@ -5245,7 +5374,8 @@
*
* \param isolate The isolate in which to check.
*/
- static bool IsExecutionTerminating(Isolate* isolate = NULL);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static bool IsExecutionTerminating(Isolate* isolate = NULL);
/**
* Resume execution capability in the given isolate, whose execution
@@ -5263,7 +5393,8 @@
*
* \param isolate The isolate in which to resume execution capability.
*/
- static void CancelTerminateExecution(Isolate* isolate);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void CancelTerminateExecution(Isolate* isolate);
/**
* Releases any resources used by v8 and stops any utility threads
@@ -5281,19 +5412,24 @@
* heap. GC is not invoked prior to iterating, therefore there is no
* guarantee that visited objects are still alive.
*/
- static void VisitExternalResources(ExternalResourceVisitor* visitor);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void VisitExternalResources(
+ ExternalResourceVisitor* visitor);
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids.
*/
- static void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void VisitHandlesWithClassIds(
+ PersistentHandleVisitor* visitor);
/**
* Iterates through all the persistent handles in isolate's heap that have
* class_ids.
*/
- static void VisitHandlesWithClassIds(
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void VisitHandlesWithClassIds(
Isolate* isolate, PersistentHandleVisitor* visitor);
/**
@@ -5303,7 +5439,8 @@
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
- static void VisitHandlesForPartialDependence(
+ // TODO(dcarney): deprecate this.
+ V8_INLINE static void VisitHandlesForPartialDependence(
Isolate* isolate, PersistentHandleVisitor* visitor);
/**
@@ -7076,6 +7213,119 @@
}
+void V8::SetAllowCodeGenerationFromStringsCallback(
+ AllowCodeGenerationFromStringsCallback callback) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->SetAllowCodeGenerationFromStringsCallback(callback);
+}
+
+
+bool V8::IsDead() {
+ Isolate* isolate = Isolate::GetCurrent();
+ return isolate->IsDead();
+}
+
+
+bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
+ Isolate* isolate = Isolate::GetCurrent();
+ return isolate->AddMessageListener(that, data);
+}
+
+
+void V8::RemoveMessageListeners(MessageCallback that) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->RemoveMessageListeners(that);
+}
+
+
+void V8::SetFailedAccessCheckCallbackFunction(
+ FailedAccessCheckCallback callback) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->SetFailedAccessCheckCallbackFunction(callback);
+}
+
+
+void V8::SetCaptureStackTraceForUncaughtExceptions(
+ bool capture, int frame_limit, StackTrace::StackTraceOptions options) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->SetCaptureStackTraceForUncaughtExceptions(capture, frame_limit,
+ options);
+}
+
+
+void V8::SetFatalErrorHandler(FatalErrorCallback callback) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->SetFatalErrorHandler(callback);
+}
+
+
+void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->RemoveGCPrologueCallback(
+ reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback));
+}
+
+
+void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->RemoveGCEpilogueCallback(
+ reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback));
+}
+
+
+void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->AddMemoryAllocationCallback(callback, space, action);
+}
+
+
+void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->RemoveMemoryAllocationCallback(callback);
+}
+
+
+void V8::TerminateExecution(Isolate* isolate) { isolate->TerminateExecution(); }
+
+
+bool V8::IsExecutionTerminating(Isolate* isolate) {
+ if (isolate == NULL) {
+ isolate = Isolate::GetCurrent();
+ }
+ return isolate->IsExecutionTerminating();
+}
+
+
+void V8::CancelTerminateExecution(Isolate* isolate) {
+ isolate->CancelTerminateExecution();
+}
+
+
+void V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->VisitExternalResources(visitor);
+}
+
+
+void V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
+ Isolate* isolate = Isolate::GetCurrent();
+ isolate->VisitHandlesWithClassIds(visitor);
+}
+
+
+void V8::VisitHandlesWithClassIds(Isolate* isolate,
+ PersistentHandleVisitor* visitor) {
+ isolate->VisitHandlesWithClassIds(visitor);
+}
+
+
+void V8::VisitHandlesForPartialDependence(Isolate* isolate,
+ PersistentHandleVisitor* visitor) {
+ isolate->VisitHandlesForPartialDependence(visitor);
+}
+
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
diff --git a/src/api.cc b/src/api.cc
index 909335d..c04393d 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -186,12 +186,6 @@
}
-bool V8::IsDead() {
- i::Isolate* isolate = i::Isolate::Current();
- return isolate->IsDead();
-}
-
-
static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
if (isolate->has_scheduled_exception()) {
return isolate->scheduled_exception() ==
@@ -347,19 +341,6 @@
}
-void V8::SetFatalErrorHandler(FatalErrorCallback that) {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->set_exception_behavior(that);
-}
-
-
-void V8::SetAllowCodeGenerationFromStringsCallback(
- AllowCodeGenerationFromStringsCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->set_allow_code_gen_callback(callback);
-}
-
-
void V8::SetFlagsFromString(const char* str, int length) {
i::FlagList::SetFlagsFromString(str, length);
}
@@ -5191,60 +5172,6 @@
heap_size_limit_(0) { }
-void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->heap()->VisitExternalResources(visitor);
-}
-
-
-class VisitorAdapter : public i::ObjectVisitor {
- public:
- explicit VisitorAdapter(PersistentHandleVisitor* visitor)
- : visitor_(visitor) {}
- virtual void VisitPointers(i::Object** start, i::Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
- Value* value = ToApi<Value>(i::Handle<i::Object>(p));
- visitor_->VisitPersistentHandle(
- reinterpret_cast<Persistent<Value>*>(&value), class_id);
- }
- private:
- PersistentHandleVisitor* visitor_;
-};
-
-
-void v8::V8::VisitHandlesWithClassIds(v8::Isolate* exported_isolate,
- PersistentHandleVisitor* visitor) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
- i::DisallowHeapAllocation no_allocation;
-
- VisitorAdapter visitor_adapter(visitor);
- isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
-}
-
-
-void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
- i::Isolate* isolate = i::Isolate::Current();
- i::DisallowHeapAllocation no_allocation;
-
- VisitorAdapter visitor_adapter(visitor);
- isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
-}
-
-
-void v8::V8::VisitHandlesForPartialDependence(
- Isolate* exported_isolate, PersistentHandleVisitor* visitor) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
- DCHECK(isolate == i::Isolate::Current());
- i::DisallowHeapAllocation no_allocation;
-
- VisitorAdapter visitor_adapter(visitor);
- isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds(
- &visitor_adapter);
-}
-
-
bool v8::V8::InitializeICU(const char* icu_data_file) {
return i::InitializeICU(icu_data_file);
}
@@ -6371,57 +6298,6 @@
}
-bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- NeanderArray listeners(isolate->factory()->message_listeners());
- NeanderObject obj(isolate, 2);
- obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
- obj.set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
- : *Utils::OpenHandle(*data));
- listeners.add(obj.value());
- return true;
-}
-
-
-void V8::RemoveMessageListeners(MessageCallback that) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::V8::RemoveMessageListeners()", return);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- NeanderArray listeners(isolate->factory()->message_listeners());
- for (int i = 0; i < listeners.length(); i++) {
- if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
-
- NeanderObject listener(i::JSObject::cast(listeners.get(i)));
- i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
- if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
- listeners.set(i, isolate->heap()->undefined_value());
- }
- }
-}
-
-
-void V8::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
- i::Isolate::Current()->SetCaptureStackTraceForUncaughtExceptions(
- capture,
- frame_limit,
- options);
-}
-
-
-void V8::SetFailedAccessCheckCallbackFunction(
- FailedAccessCheckCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->SetFailedAccessCheckCallback(callback);
-}
-
-
void Isolate::CollectAllGarbage(const char* gc_reason) {
reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
i::Heap::kNoGCFlags, gc_reason);
@@ -6551,13 +6427,6 @@
}
-void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->heap()->RemoveGCPrologueCallback(
- reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback));
-}
-
-
void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
isolate->heap()->AddGCEpilogueCallback(
@@ -6567,62 +6436,55 @@
}
-void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->heap()->RemoveGCEpilogueCallback(
- reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback));
-}
-
-
-void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- i::Isolate* isolate = i::Isolate::Current();
+void Isolate::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->memory_allocator()->AddMemoryAllocationCallback(
callback, space, action);
}
-void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
+void Isolate::RemoveMemoryAllocationCallback(
+ MemoryAllocationCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->memory_allocator()->RemoveMemoryAllocationCallback(
callback);
}
-void V8::TerminateExecution(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i_isolate->stack_guard()->RequestTerminateExecution();
+void Isolate::TerminateExecution() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->stack_guard()->RequestTerminateExecution();
}
-bool V8::IsExecutionTerminating(Isolate* isolate) {
- i::Isolate* i_isolate = isolate != NULL ?
- reinterpret_cast<i::Isolate*>(isolate) : i::Isolate::Current();
- return IsExecutionTerminatingCheck(i_isolate);
+bool Isolate::IsExecutionTerminating() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return IsExecutionTerminatingCheck(isolate);
}
-void V8::CancelTerminateExecution(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i_isolate->stack_guard()->ClearTerminateExecution();
- i_isolate->CancelTerminateExecution();
+void Isolate::CancelTerminateExecution() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->stack_guard()->ClearTerminateExecution();
+ isolate->CancelTerminateExecution();
}
void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
- i_isolate->set_api_interrupt_callback(callback);
- i_isolate->set_api_interrupt_callback_data(data);
- i_isolate->stack_guard()->RequestApiInterrupt();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_api_interrupt_callback(callback);
+ isolate->set_api_interrupt_callback_data(data);
+ isolate->stack_guard()->RequestApiInterrupt();
}
void Isolate::ClearInterrupt() {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
- i_isolate->stack_guard()->ClearApiInterrupt();
- i_isolate->set_api_interrupt_callback(NULL);
- i_isolate->set_api_interrupt_callback_data(NULL);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->stack_guard()->ClearApiInterrupt();
+ isolate->set_api_interrupt_callback(NULL);
+ isolate->set_api_interrupt_callback_data(NULL);
}
@@ -6856,7 +6718,7 @@
}
-bool v8::Isolate::IdleNotification(int idle_time_in_ms) {
+bool Isolate::IdleNotification(int idle_time_in_ms) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -6865,7 +6727,7 @@
}
-void v8::Isolate::LowMemoryNotification() {
+void Isolate::LowMemoryNotification() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
{
i::HistogramTimerScope idle_notification_scope(
@@ -6875,14 +6737,14 @@
}
-int v8::Isolate::ContextDisposedNotification() {
+int Isolate::ContextDisposedNotification() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->heap()->NotifyContextDisposed();
}
-void v8::Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
- JitCodeEventHandler event_handler) {
+void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
+ JitCodeEventHandler event_handler) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
// Ensure that logging is initialized for our isolate.
isolate->InitializeLoggingAndCounters();
@@ -6890,14 +6752,14 @@
}
-void v8::Isolate::SetStackLimit(uintptr_t stack_limit) {
+void Isolate::SetStackLimit(uintptr_t stack_limit) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
CHECK(stack_limit);
isolate->stack_guard()->SetStackLimit(stack_limit);
}
-void v8::Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
+void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (isolate->code_range()->valid()) {
*start = isolate->code_range()->start();
@@ -6909,6 +6771,115 @@
}
+void Isolate::SetFatalErrorHandler(FatalErrorCallback that) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_exception_behavior(that);
+}
+
+
+void Isolate::SetAllowCodeGenerationFromStringsCallback(
+ AllowCodeGenerationFromStringsCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_allow_code_gen_callback(callback);
+}
+
+
+bool Isolate::IsDead() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->IsDead();
+}
+
+
+bool Isolate::AddMessageListener(MessageCallback that, Handle<Value> data) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ NeanderArray listeners(isolate->factory()->message_listeners());
+ NeanderObject obj(isolate, 2);
+ obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
+ obj.set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
+ : *Utils::OpenHandle(*data));
+ listeners.add(obj.value());
+ return true;
+}
+
+
+void Isolate::RemoveMessageListeners(MessageCallback that) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ ON_BAILOUT(isolate, "v8::V8::RemoveMessageListeners()", return);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ NeanderArray listeners(isolate->factory()->message_listeners());
+ for (int i = 0; i < listeners.length(); i++) {
+ if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
+
+ NeanderObject listener(i::JSObject::cast(listeners.get(i)));
+ i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
+ if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
+ listeners.set(i, isolate->heap()->undefined_value());
+ }
+ }
+}
+
+
+void Isolate::SetFailedAccessCheckCallbackFunction(
+ FailedAccessCheckCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetFailedAccessCheckCallback(callback);
+}
+
+
+void Isolate::SetCaptureStackTraceForUncaughtExceptions(
+ bool capture, int frame_limit, StackTrace::StackTraceOptions options) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(capture, frame_limit,
+ options);
+}
+
+
+void Isolate::VisitExternalResources(ExternalResourceVisitor* visitor) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->VisitExternalResources(visitor);
+}
+
+
+class VisitorAdapter : public i::ObjectVisitor {
+ public:
+ explicit VisitorAdapter(PersistentHandleVisitor* visitor)
+ : visitor_(visitor) {}
+ virtual void VisitPointers(i::Object** start, i::Object** end) {
+ UNREACHABLE();
+ }
+ virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
+ Value* value = ToApi<Value>(i::Handle<i::Object>(p));
+ visitor_->VisitPersistentHandle(
+ reinterpret_cast<Persistent<Value>*>(&value), class_id);
+ }
+
+ private:
+ PersistentHandleVisitor* visitor_;
+};
+
+
+void Isolate::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::DisallowHeapAllocation no_allocation;
+ VisitorAdapter visitor_adapter(visitor);
+ isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
+}
+
+
+void Isolate::VisitHandlesForPartialDependence(
+ PersistentHandleVisitor* visitor) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::DisallowHeapAllocation no_allocation;
+ VisitorAdapter visitor_adapter(visitor);
+ isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds(
+ &visitor_adapter);
+}
+
+
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
@@ -6989,12 +6960,7 @@
i::Handle<i::JSObject> js_obj = i::Handle<i::JSObject>::cast(obj);
i::Isolate* isolate = js_obj->GetIsolate();
ENTER_V8(isolate);
- i::Handle<i::Name> key = isolate->factory()->detailed_stack_trace_symbol();
- i::Handle<i::Object> property = i::JSObject::GetDataProperty(js_obj, key);
- if (property->IsJSArray()) {
- return Utils::StackTraceToLocal(i::Handle<i::JSArray>::cast(property));
- }
- return Local<StackTrace>();
+ return Utils::StackTraceToLocal(isolate->GetDetailedStackTrace(js_obj));
}
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 4546511..90018ea 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -109,6 +109,9 @@
if (cpu.architecture() >= 7) {
if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
+ if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
+ supported_ |= 1u << ARMv8;
+ }
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
// Use movw/movt for QUALCOMM ARMv7 cores.
if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 47d705f..76e6738 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -2505,6 +2505,74 @@
}
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in r0.
+ DCHECK(lit != NULL);
+ __ push(r0);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = r1;
+ __ ldr(scratch,
+ FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ ldr(scratch, MemOperand(sp, kPointerSize)); // constructor
+ } else {
+ __ ldr(scratch, MemOperand(sp, 0)); // prototype
+ }
+ __ push(scratch);
+ VisitForStackValue(key);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ VisitForStackValue(value);
+ __ mov(scratch, Operand(Smi::FromInt(NONE)));
+ __ push(scratch);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ VisitForStackValue(value);
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ push(scratch);
+ __ mov(scratch, Operand(Smi::FromInt(NONE)));
+ __ push(scratch);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ push(scratch);
+ VisitForStackValue(value);
+ __ mov(scratch, Operand(Smi::FromInt(NONE)));
+ __ push(scratch);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
@@ -2944,13 +3012,16 @@
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // r4: copy of the first argument or undefined if it doesn't exist.
+ // r5: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
+ __ ldr(r5, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
}
+ // r4: the receiver of the enclosing function.
+ __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
// r3: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
@@ -2962,8 +3033,9 @@
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
+ __ Push(r5);
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc
index 0a8e1ca..a318b61 100644
--- a/src/arm64/full-codegen-arm64.cc
+++ b/src/arm64/full-codegen-arm64.cc
@@ -300,7 +300,8 @@
VisitDeclarations(scope()->declarations());
}
- { Comment cmnt(masm_, "[ Stack check");
+ {
+ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
DCHECK(jssp.Is(__ StackPointer()));
@@ -312,7 +313,8 @@
__ Bind(&ok);
}
- { Comment cmnt(masm_, "[ Body");
+ {
+ Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
VisitStatements(function()->body());
DCHECK(loop_depth() == 0);
@@ -2040,7 +2042,7 @@
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
+ // Call keyed load IC. It has arguments key and receiver in x0 and x1.
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
@@ -2175,6 +2177,74 @@
}
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in x0.
+ DCHECK(lit != NULL);
+ __ push(x0);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = x1;
+ __ Ldr(scratch,
+ FieldMemOperand(x0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ Peek(scratch, kPointerSize); // constructor
+ } else {
+ __ Peek(scratch, 0); // prototype
+ }
+ __ Push(scratch);
+ VisitForStackValue(key);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ VisitForStackValue(value);
+ __ Mov(scratch, Smi::FromInt(NONE));
+ __ Push(scratch);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ VisitForStackValue(value);
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ push(scratch);
+ __ Mov(scratch, Smi::FromInt(NONE));
+ __ Push(scratch);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ push(scratch);
+ VisitForStackValue(value);
+ __ Mov(scratch, Smi::FromInt(NONE));
+ __ Push(scratch);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
void FullCodeGenerator::EmitAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
@@ -2609,11 +2679,12 @@
// Prepare to push a copy of the first argument or undefined if it doesn't
// exist.
if (arg_count > 0) {
- __ Peek(x10, arg_count * kXRegSize);
+ __ Peek(x9, arg_count * kXRegSize);
} else {
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(x9, Heap::kUndefinedValueRootIndex);
}
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// Prepare to push the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
@@ -2624,10 +2695,10 @@
__ Mov(x13, Smi::FromInt(scope()->start_position()));
// Push.
- __ Push(x10, x11, x12, x13);
+ __ Push(x9, x10, x11, x12, x13);
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
@@ -4858,7 +4929,7 @@
// The value stays in x0, and is ultimately read by the resumed generator, as
// if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed. r1
+ // is read to throw the value when the resumed generator is already closed. x1
// will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
diff --git a/src/ast-numbering.cc b/src/ast-numbering.cc
index e878424..8dfdccb 100644
--- a/src/ast-numbering.cc
+++ b/src/ast-numbering.cc
@@ -39,7 +39,10 @@
return tmp;
}
+ void IncrementNodeCount() { properties_.add_node_count(1); }
+
int next_id_;
+ AstProperties properties_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
@@ -47,102 +50,127 @@
void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
+ IncrementNodeCount();
VisitVariableProxy(node->proxy());
}
void AstNumberingVisitor::VisitExportDeclaration(ExportDeclaration* node) {
+ IncrementNodeCount();
VisitVariableProxy(node->proxy());
}
-void AstNumberingVisitor::VisitModuleUrl(ModuleUrl* node) {}
+void AstNumberingVisitor::VisitModuleUrl(ModuleUrl* node) {
+ IncrementNodeCount();
+}
-void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {}
+void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
+ IncrementNodeCount();
+}
-void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {}
+void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {
+ IncrementNodeCount();
+}
-void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {}
+void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
+ IncrementNodeCount();
+}
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(DebuggerStatement::num_ids()));
}
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
}
void AstNumberingVisitor::VisitLiteral(Literal* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(Literal::num_ids()));
}
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(RegExpLiteral::num_ids()));
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(ThisFunction::num_ids()));
}
void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(SuperReference::num_ids()));
Visit(node->this_var());
}
void AstNumberingVisitor::VisitModuleDeclaration(ModuleDeclaration* node) {
+ IncrementNodeCount();
VisitVariableProxy(node->proxy());
Visit(node->module());
}
void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
+ IncrementNodeCount();
VisitVariableProxy(node->proxy());
Visit(node->module());
}
void AstNumberingVisitor::VisitModuleVariable(ModuleVariable* node) {
+ IncrementNodeCount();
Visit(node->proxy());
}
void AstNumberingVisitor::VisitModulePath(ModulePath* node) {
+ IncrementNodeCount();
Visit(node->module());
}
void AstNumberingVisitor::VisitModuleStatement(ModuleStatement* node) {
+ IncrementNodeCount();
Visit(node->body());
}
void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
+ IncrementNodeCount();
Visit(node->expression());
}
void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
+ IncrementNodeCount();
Visit(node->expression());
}
void AstNumberingVisitor::VisitYield(Yield* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(Yield::num_ids()));
Visit(node->generator_object());
Visit(node->expression());
@@ -150,24 +178,28 @@
void AstNumberingVisitor::VisitThrow(Throw* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(Throw::num_ids()));
Visit(node->exception());
}
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(UnaryOperation::num_ids()));
Visit(node->expression());
}
void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(CountOperation::num_ids()));
Visit(node->expression());
}
void AstNumberingVisitor::VisitBlock(Block* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(Block::num_ids()));
if (node->scope() != NULL) VisitDeclarations(node->scope()->declarations());
VisitStatements(node->statements());
@@ -175,29 +207,34 @@
void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
+ IncrementNodeCount();
VisitVariableProxy(node->proxy());
VisitFunctionLiteral(node->fun());
}
void AstNumberingVisitor::VisitModuleLiteral(ModuleLiteral* node) {
+ IncrementNodeCount();
VisitBlock(node->body());
}
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
+ IncrementNodeCount();
Visit(node->expression());
Visit(node->statement());
}
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
Visit(node->body());
Visit(node->cond());
@@ -205,6 +242,7 @@
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
Visit(node->cond());
Visit(node->body());
@@ -212,18 +250,21 @@
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
+ IncrementNodeCount();
Visit(node->try_block());
Visit(node->catch_block());
}
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
+ IncrementNodeCount();
Visit(node->try_block());
Visit(node->finally_block());
}
void AstNumberingVisitor::VisitProperty(Property* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(Property::num_ids()));
Visit(node->key());
Visit(node->obj());
@@ -231,6 +272,7 @@
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(Assignment::num_ids()));
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
Visit(node->target());
@@ -239,6 +281,7 @@
void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(BinaryOperation::num_ids()));
Visit(node->left());
Visit(node->right());
@@ -246,6 +289,7 @@
void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(CompareOperation::num_ids()));
Visit(node->left());
Visit(node->right());
@@ -253,6 +297,7 @@
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
Visit(node->each());
Visit(node->enumerable());
@@ -261,6 +306,7 @@
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator());
Visit(node->next_result());
@@ -271,6 +317,7 @@
void AstNumberingVisitor::VisitConditional(Conditional* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(Conditional::num_ids()));
Visit(node->condition());
Visit(node->then_expression());
@@ -279,6 +326,7 @@
void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(IfStatement::num_ids()));
Visit(node->condition());
Visit(node->then_statement());
@@ -289,6 +337,7 @@
void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(SwitchStatement::num_ids()));
Visit(node->tag());
ZoneList<CaseClause*>* cases = node->cases();
@@ -299,6 +348,7 @@
void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(CaseClause::num_ids()));
if (!node->is_default()) Visit(node->label());
VisitStatements(node->statements());
@@ -306,6 +356,7 @@
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
if (node->init() != NULL) Visit(node->init());
if (node->cond() != NULL) Visit(node->cond());
@@ -315,6 +366,7 @@
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
@@ -325,6 +377,7 @@
void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(ObjectLiteral::num_ids()));
for (int i = 0; i < node->properties()->length(); i++) {
VisitObjectLiteralProperty(node->properties()->at(i));
@@ -340,6 +393,7 @@
void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(node->num_ids()));
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
@@ -348,6 +402,7 @@
void AstNumberingVisitor::VisitCall(Call* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(Call::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
@@ -355,6 +410,7 @@
void AstNumberingVisitor::VisitCallNew(CallNew* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(CallNew::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
@@ -385,6 +441,7 @@
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
+ IncrementNodeCount();
node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
// We don't recurse into the declarations or body of the function literal:
// you have to separately Renumber() each FunctionLiteral that you compile.
@@ -392,6 +449,10 @@
void AstNumberingVisitor::Renumber(FunctionLiteral* node) {
+ properties_.flags()->Add(*node->flags());
+ properties_.increase_feedback_slots(node->slot_count());
+ properties_.increase_ic_feedback_slots(node->ic_slot_count());
+
if (node->scope()->HasIllegalRedeclaration()) {
node->scope()->VisitIllegalRedeclaration(this);
return;
@@ -404,6 +465,8 @@
Visit(scope->function());
}
VisitStatements(node->body());
+
+ node->set_ast_properties(&properties_);
}
diff --git a/src/ast.cc b/src/ast.cc
index 11bb4ef..1df668d 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -998,36 +998,30 @@
#define REGULAR_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
}
#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
add_slot_node(node); \
}
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
add_slot_node(node); \
set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_TURBOFAN_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
set_dont_crankshaft_reason(k##NodeType); \
set_dont_turbofan_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_TURBOFAN_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
add_slot_node(node); \
set_dont_crankshaft_reason(k##NodeType); \
set_dont_turbofan_reason(k##NodeType); \
@@ -1035,18 +1029,15 @@
}
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
add_slot_node(node); \
add_flag(kDontSelfOptimize); \
}
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
add_flag(kDontCache); \
@@ -1120,7 +1111,6 @@
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
- increase_node_count();
add_slot_node(node);
if (node->is_jsruntime()) {
// Don't try to optimize JS runtime calls because we bailout on them.
diff --git a/src/ast.h b/src/ast.h
index ae7ec1a..f5c0771 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1512,6 +1512,8 @@
void set_emit_store(bool emit_store);
bool emit_store();
+ bool is_static() const { return is_static_; }
+
protected:
template<class> friend class AstNodeFactory;
@@ -3144,7 +3146,6 @@
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
- void increase_node_count() { properties_.add_node_count(1); }
void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
void set_dont_crankshaft_reason(BailoutReason reason) {
dont_crankshaft_reason_ = reason;
diff --git a/src/base/platform/platform-win32.cc b/src/base/platform/platform-win32.cc
index 9b554b2..1c46cf6 100644
--- a/src/base/platform/platform-win32.cc
+++ b/src/base/platform/platform-win32.cc
@@ -346,41 +346,26 @@
}
+int64_t FileTimeToInt64(FILETIME ft) {
+ ULARGE_INTEGER result;
+ result.LowPart = ft.dwLowDateTime;
+ result.HighPart = ft.dwHighDateTime;
+ return static_cast<int64_t>(result.QuadPart);
+}
+
+
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
- cache->InitializeIfNeeded();
-
- Win32Time rounded_to_second(*this);
- rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
- 1000 * kTimeScaler;
- // Convert to local time using POSIX localtime function.
- // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
- // very slow. Other browsers use localtime().
-
- // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
- // POSIX seconds past 1/1/1970 0:00:00.
- double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
- if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
- return 0;
- }
- // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
- time_t posix_time = static_cast<time_t>(unchecked_posix_time);
-
- // Convert to local time, as struct with fields for day, hour, year, etc.
- tm posix_local_time_struct;
- if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
-
- if (posix_local_time_struct.tm_isdst > 0) {
- return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
- } else if (posix_local_time_struct.tm_isdst == 0) {
- return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
- } else {
- return cache->tzinfo_.Bias * -kMsPerMinute;
- }
+ FILETIME local;
+ SYSTEMTIME system_utc, system_local;
+ FileTimeToSystemTime(&time_.ft_, &system_utc);
+ SystemTimeToTzSpecificLocalTime(NULL, &system_utc, &system_local);
+ SystemTimeToFileTime(&system_local, &local);
+ return (FileTimeToInt64(local) - FileTimeToInt64(time_.ft_)) / kTimeScaler;
}
diff --git a/src/builtins.cc b/src/builtins.cc
index 27d3054..dca2037 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1310,22 +1310,22 @@
static void Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, SLOPPY, kMissOnMissingHandler);
+ KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
}
static void Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, STRICT, kMissOnMissingHandler);
+ KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, SLOPPY, kCallRuntimeOnMissingHandler);
+ KeyedStoreIC::GenerateGeneric(masm, SLOPPY);
}
static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, STRICT, kCallRuntimeOnMissingHandler);
+ KeyedStoreIC::GenerateGeneric(masm, STRICT);
}
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 4e02cdd..aab2fe5 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -221,10 +221,8 @@
MaybeHandle<SharedFunctionInfo> CompilationCacheEval::Lookup(
- Handle<String> source,
- Handle<Context> context,
- StrictMode strict_mode,
- int scope_position) {
+ Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+ StrictMode strict_mode, int scope_position) {
HandleScope scope(isolate());
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
@@ -233,14 +231,14 @@
int generation;
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupEval(source, context, strict_mode, scope_position);
+ result = table->LookupEval(source, outer_info, strict_mode, scope_position);
if (result->IsSharedFunctionInfo()) break;
}
if (result->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(result);
if (generation != 0) {
- Put(source, context, function_info, scope_position);
+ Put(source, outer_info, function_info, scope_position);
}
isolate()->counters()->compilation_cache_hits()->Increment();
return scope.CloseAndEscape(function_info);
@@ -252,12 +250,12 @@
void CompilationCacheEval::Put(Handle<String> source,
- Handle<Context> context,
+ Handle<SharedFunctionInfo> outer_info,
Handle<SharedFunctionInfo> function_info,
int scope_position) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
- table = CompilationCacheTable::PutEval(table, source, context,
+ table = CompilationCacheTable::PutEval(table, source, outer_info,
function_info, scope_position);
SetFirstTable(table);
}
@@ -324,20 +322,18 @@
MaybeHandle<SharedFunctionInfo> CompilationCache::LookupEval(
- Handle<String> source,
- Handle<Context> context,
- StrictMode strict_mode,
- int scope_position) {
+ Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context, StrictMode strict_mode, int scope_position) {
if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
MaybeHandle<SharedFunctionInfo> result;
if (context->IsNativeContext()) {
- result = eval_global_.Lookup(
- source, context, strict_mode, scope_position);
+ result =
+ eval_global_.Lookup(source, outer_info, strict_mode, scope_position);
} else {
DCHECK(scope_position != RelocInfo::kNoPosition);
- result = eval_contextual_.Lookup(
- source, context, strict_mode, scope_position);
+ result = eval_contextual_.Lookup(source, outer_info, strict_mode,
+ scope_position);
}
return result;
}
@@ -361,6 +357,7 @@
void CompilationCache::PutEval(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info,
int scope_position) {
@@ -368,10 +365,10 @@
HandleScope scope(isolate());
if (context->IsNativeContext()) {
- eval_global_.Put(source, context, function_info, scope_position);
+ eval_global_.Put(source, outer_info, function_info, scope_position);
} else {
DCHECK(scope_position != RelocInfo::kNoPosition);
- eval_contextual_.Put(source, context, function_info, scope_position);
+ eval_contextual_.Put(source, outer_info, function_info, scope_position);
}
}
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index fe623dc..6799b1c 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -114,14 +114,12 @@
: CompilationSubCache(isolate, generations) { }
MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Context> context,
+ Handle<SharedFunctionInfo> outer_info,
StrictMode strict_mode,
int scope_position);
- void Put(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position);
+ void Put(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+ Handle<SharedFunctionInfo> function_info, int scope_position);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
@@ -161,8 +159,8 @@
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
MaybeHandle<SharedFunctionInfo> LookupEval(
- Handle<String> source, Handle<Context> context, StrictMode strict_mode,
- int scope_position);
+ Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context, StrictMode strict_mode, int scope_position);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
@@ -177,10 +175,9 @@
// Associate the (source, context->closure()->shared(), kind) triple
// with the shared function info. This may overwrite an existing mapping.
- void PutEval(Handle<String> source,
+ void PutEval(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info,
- int scope_position);
+ Handle<SharedFunctionInfo> function_info, int scope_position);
// Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping.
diff --git a/src/compiler.cc b/src/compiler.cc
index 92331c6..8f50d9a 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -647,12 +647,7 @@
static bool CompileUnoptimizedCode(CompilationInfo* info) {
DCHECK(AllowCompilation::IsAllowed(info->isolate()));
- DCHECK(info->function() != NULL);
- if (!Rewriter::Rewrite(info)) return false;
- if (!Scope::Analyze(info)) return false;
- DCHECK(info->scope() != NULL);
-
- if (!FullCodeGenerator::MakeCode(info)) {
+ if (!Compiler::Analyze(info) || !FullCodeGenerator::MakeCode(info)) {
Isolate* isolate = info->isolate();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return false;
@@ -673,7 +668,6 @@
shared->set_strict_mode(lit->strict_mode());
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
shared->set_bailout_reason(lit->dont_optimize_reason());
- shared->set_ast_node_count(lit->ast_node_count());
// Compile unoptimized code.
if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
@@ -743,18 +737,33 @@
}
-static bool CompileOptimizedPrologue(CompilationInfo* info) {
- if (!Parser::Parse(info)) return false;
+static bool Renumber(CompilationInfo* info) {
+ if (!AstNumbering::Renumber(info->function(), info->zone())) return false;
+ if (!info->shared_info().is_null()) {
+ info->shared_info()->set_ast_node_count(info->function()->ast_node_count());
+ }
+ return true;
+}
+
+
+bool Compiler::Analyze(CompilationInfo* info) {
+ DCHECK(info->function() != NULL);
if (!Rewriter::Rewrite(info)) return false;
if (!Scope::Analyze(info)) return false;
- if (!AstNumbering::Renumber(info->function(), info->zone())) return false;
+ if (!Renumber(info)) return false;
DCHECK(info->scope() != NULL);
return true;
}
+bool Compiler::ParseAndAnalyze(CompilationInfo* info) {
+ if (!Parser::Parse(info)) return false;
+ return Compiler::Analyze(info);
+}
+
+
static bool GetOptimizedCodeNow(CompilationInfo* info) {
- if (!CompileOptimizedPrologue(info)) return false;
+ if (!Compiler::ParseAndAnalyze(info)) return false;
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
@@ -796,7 +805,7 @@
}
CompilationHandleScope handle_scope(info);
- if (!CompileOptimizedPrologue(info)) return false;
+ if (!Compiler::ParseAndAnalyze(info)) return false;
info->SaveHandles(); // Copy handles to the compilation handle scope.
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
@@ -910,6 +919,8 @@
// TODO(turbofan): In the future, unoptimized code with deopt support could
// be generated lazily once deopt is triggered.
bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
+ DCHECK(info->function() != NULL);
+ DCHECK(info->scope() != NULL);
if (!info->shared_info()->has_deoptimization_support()) {
CompilationInfoWithZone unoptimized(info->shared_info());
// Note that we use the same AST that we will use for generating the
@@ -1096,11 +1107,9 @@
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
- Handle<String> source,
- Handle<Context> context,
- StrictMode strict_mode,
- ParseRestriction restriction,
- int scope_position) {
+ Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context, StrictMode strict_mode,
+ ParseRestriction restriction, int scope_position) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_eval_size()->Increment(source_length);
@@ -1108,7 +1117,7 @@
CompilationCache* compilation_cache = isolate->compilation_cache();
MaybeHandle<SharedFunctionInfo> maybe_shared_info =
- compilation_cache->LookupEval(source, context, strict_mode,
+ compilation_cache->LookupEval(source, outer_info, context, strict_mode,
scope_position);
Handle<SharedFunctionInfo> shared_info;
@@ -1135,8 +1144,8 @@
// If caller is strict mode, the result must be in strict mode as well.
DCHECK(strict_mode == SLOPPY || shared_info->strict_mode() == STRICT);
if (!shared_info->dont_cache()) {
- compilation_cache->PutEval(
- source, context, shared_info, scope_position);
+ compilation_cache->PutEval(source, outer_info, context, shared_info,
+ scope_position);
}
}
} else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
@@ -1304,7 +1313,7 @@
Handle<Code> code = isolate->builtins()->CompileLazy();
info.SetCode(code);
scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
- } else if (FullCodeGenerator::MakeCode(&info)) {
+ } else if (Renumber(&info) && FullCodeGenerator::MakeCode(&info)) {
DCHECK(!info.code().is_null());
scope_info = ScopeInfo::Create(info.scope(), info.zone());
} else {
diff --git a/src/compiler.h b/src/compiler.h
index 47579bd..2cacb40 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -675,20 +675,23 @@
MUST_USE_RESULT static MaybeHandle<Code> GetDebugCode(
Handle<JSFunction> function);
+ // Parser::Parse, then Compiler::Analyze.
+ static bool ParseAndAnalyze(CompilationInfo* info);
+ // Rewrite, analyze scopes, and renumber.
+ static bool Analyze(CompilationInfo* info);
+ // Adds deoptimization support, requires ParseAndAnalyze.
+ static bool EnsureDeoptimizationSupport(CompilationInfo* info);
+
static bool EnsureCompiled(Handle<JSFunction> function,
ClearExceptionFlag flag);
- static bool EnsureDeoptimizationSupport(CompilationInfo* info);
-
static void CompileForLiveEdit(Handle<Script> script);
// Compile a String source within a context for eval.
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
- Handle<String> source,
- Handle<Context> context,
- StrictMode strict_mode,
- ParseRestriction restriction,
- int scope_position);
+ Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context, StrictMode strict_mode,
+ ParseRestriction restriction, int scope_position);
// Compile a String source within a context.
static Handle<SharedFunctionInfo> CompileScript(
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index 051b47c..2c27022 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -59,6 +59,13 @@
// static
+FieldAccess AccessBuilder::ForStringLength() {
+ return {kTaggedBase, String::kLengthOffset, Handle<Name>(),
+ Type::SignedSmall(), kMachAnyTagged};
+}
+
+
+// static
FieldAccess AccessBuilder::ForValue() {
return {kTaggedBase, JSValue::kValueOffset, Handle<Name>(), Type::Any(),
kMachAnyTagged};
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index 73c81ef..4c22efa 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -37,6 +37,9 @@
// Provides access to Map::instance_type() field.
static FieldAccess ForMapInstanceType();
+ // Provides access to String::length() field.
+ static FieldAccess ForStringLength();
+
// Provides access to JSValue::value() field.
static FieldAccess ForValue();
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 95e1bde..56b1bf0 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -1139,8 +1139,12 @@
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::Flag::kNoFlags;
+ return MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kInt32ModIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kUint32ModIsSafe;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index fd062f2..913dba8 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -1300,8 +1300,9 @@
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::Flag::kNoFlags;
+ return MachineOperatorBuilder::kNoFlags;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index b230090..4887260 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -5,10 +5,11 @@
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler.h"
+#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties.h"
#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/full-codegen.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -24,7 +25,8 @@
jsgraph_(jsgraph),
globals_(0, local_zone),
breakable_(NULL),
- execution_context_(NULL) {
+ execution_context_(NULL),
+ loop_assignment_analysis_(NULL) {
InitializeAstVisitor(local_zone);
}
@@ -59,6 +61,12 @@
int parameter_count = info()->num_parameters();
graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
+ if (FLAG_loop_assignment_analysis) {
+ // TODO(turbofan): use a temporary zone for the loop assignment analysis.
+ AstLoopAssignmentAnalyzer analyzer(zone(), info());
+ loop_assignment_analysis_ = analyzer.Analyze();
+ }
+
// Initialize the top-level environment.
Environment env(this, scope, graph()->start());
set_environment(&env);
@@ -579,9 +587,16 @@
}
+BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
+ IterationStatement* stmt) {
+ if (loop_assignment_analysis_ == NULL) return NULL;
+ return loop_assignment_analysis_->GetVariablesAssignedInLoop(stmt);
+}
+
+
void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder while_loop(this);
- while_loop.BeginLoop();
+ while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
VisitIterationBody(stmt, &while_loop, 0);
while_loop.EndBody();
VisitForTest(stmt->cond());
@@ -593,7 +608,7 @@
void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
LoopBuilder while_loop(this);
- while_loop.BeginLoop();
+ while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
while_loop.BreakUnless(condition);
@@ -606,7 +621,7 @@
void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
LoopBuilder for_loop(this);
VisitIfNotNull(stmt->init());
- for_loop.BeginLoop();
+ for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
if (stmt->cond() != NULL) {
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
@@ -682,7 +697,7 @@
environment()->Push(jsgraph()->ZeroConstant());
// PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
LoopBuilder for_loop(this);
- for_loop.BeginLoop();
+ for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
// Check loop termination condition.
Node* index = environment()->Peek(0);
Node* exit_cond =
@@ -1308,12 +1323,14 @@
// Create node to ask for help resolving potential eval call. This will
// provide a fully resolved callee and the corresponding receiver.
+ Node* function = GetFunctionClosure();
Node* receiver = environment()->Lookup(info()->scope()->receiver());
Node* strict = jsgraph()->Constant(strict_mode());
Node* position = jsgraph()->Constant(info()->scope()->start_position());
const Operator* op =
- javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
- Node* pair = NewNode(op, callee, source, receiver, strict, position);
+ javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
+ Node* pair =
+ NewNode(op, callee, source, function, receiver, strict, position);
PrepareFrameState(pair, expr->EvalOrLookupId(),
OutputFrameStateCombine::PokeAt(arg_count + 1));
Node* new_callee = NewNode(common()->Projection(0), pair);
@@ -2113,7 +2130,7 @@
jsgraph()->ZeroConstant());
Node* stack = NewNode(jsgraph()->machine()->LoadStackPointer());
Node* tag = NewNode(jsgraph()->machine()->UintLessThan(), limit, stack);
- stack_check.If(tag);
+ stack_check.If(tag, BranchHint::kTrue);
stack_check.Then();
stack_check.Else();
Node* guard = NewNode(javascript()->CallRuntime(Runtime::kStackGuard, 0));
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 15f9230..7cff07c 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -16,8 +16,9 @@
namespace compiler {
class ControlBuilder;
-class LoopBuilder;
class Graph;
+class LoopAssignmentAnalysis;
+class LoopBuilder;
// The AstGraphBuilder produces a high-level IR graph, based on an
// underlying AST. The produced graph can either be compiled into a
@@ -135,6 +136,8 @@
SetOncePointer<Node> function_closure_;
SetOncePointer<Node> function_context_;
+ LoopAssignmentAnalysis* loop_assignment_analysis_;
+
CompilationInfo* info() const { return info_; }
inline StrictMode strict_mode() const;
JSGraph* jsgraph() { return jsgraph_; }
@@ -188,6 +191,8 @@
OutputFrameStateCombine StateCombineFromAstContext();
+ BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
+
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
};
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
new file mode 100644
index 0000000..7adac56
--- /dev/null
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -0,0 +1,305 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/ast-loop-assignment-analyzer.h"
+#include "src/parser.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef class AstLoopAssignmentAnalyzer ALAA; // for code shortitude.
+
+ALAA::AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info)
+ : info_(info), loop_stack_(zone) {
+ InitializeAstVisitor(zone);
+}
+
+
+LoopAssignmentAnalysis* ALAA::Analyze() {
+ LoopAssignmentAnalysis* a = new (zone()) LoopAssignmentAnalysis(zone());
+ result_ = a;
+ VisitStatements(info()->function()->body());
+ result_ = NULL;
+ return a;
+}
+
+
+void ALAA::Enter(IterationStatement* loop) {
+ int num_variables = 1 + info()->scope()->num_parameters() +
+ info()->scope()->num_stack_slots();
+ BitVector* bits = new (zone()) BitVector(num_variables, zone());
+ loop_stack_.push_back(bits);
+}
+
+
+void ALAA::Exit(IterationStatement* loop) {
+ DCHECK(loop_stack_.size() > 0);
+ BitVector* bits = loop_stack_.back();
+ loop_stack_.pop_back();
+ if (!loop_stack_.empty()) {
+ loop_stack_.back()->Union(*bits);
+ }
+ result_->list_.push_back(
+ std::pair<IterationStatement*, BitVector*>(loop, bits));
+}
+
+
+// ---------------------------------------------------------------------------
+// -- Leaf nodes -------------------------------------------------------------
+// ---------------------------------------------------------------------------
+
+void ALAA::VisitVariableDeclaration(VariableDeclaration* leaf) {}
+void ALAA::VisitFunctionDeclaration(FunctionDeclaration* leaf) {}
+void ALAA::VisitModuleDeclaration(ModuleDeclaration* leaf) {}
+void ALAA::VisitImportDeclaration(ImportDeclaration* leaf) {}
+void ALAA::VisitExportDeclaration(ExportDeclaration* leaf) {}
+void ALAA::VisitModuleVariable(ModuleVariable* leaf) {}
+void ALAA::VisitModulePath(ModulePath* leaf) {}
+void ALAA::VisitModuleUrl(ModuleUrl* leaf) {}
+void ALAA::VisitEmptyStatement(EmptyStatement* leaf) {}
+void ALAA::VisitContinueStatement(ContinueStatement* leaf) {}
+void ALAA::VisitBreakStatement(BreakStatement* leaf) {}
+void ALAA::VisitDebuggerStatement(DebuggerStatement* leaf) {}
+void ALAA::VisitFunctionLiteral(FunctionLiteral* leaf) {}
+void ALAA::VisitNativeFunctionLiteral(NativeFunctionLiteral* leaf) {}
+void ALAA::VisitVariableProxy(VariableProxy* leaf) {}
+void ALAA::VisitLiteral(Literal* leaf) {}
+void ALAA::VisitRegExpLiteral(RegExpLiteral* leaf) {}
+void ALAA::VisitThisFunction(ThisFunction* leaf) {}
+void ALAA::VisitSuperReference(SuperReference* leaf) {}
+
+
+// ---------------------------------------------------------------------------
+// -- Pass-through nodes------------------------------------------------------
+// ---------------------------------------------------------------------------
+void ALAA::VisitModuleLiteral(ModuleLiteral* e) { Visit(e->body()); }
+
+
+void ALAA::VisitBlock(Block* stmt) { VisitStatements(stmt->statements()); }
+
+
+void ALAA::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void ALAA::VisitIfStatement(IfStatement* stmt) {
+ Visit(stmt->condition());
+ Visit(stmt->then_statement());
+ Visit(stmt->else_statement());
+}
+
+
+void ALAA::VisitReturnStatement(ReturnStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void ALAA::VisitWithStatement(WithStatement* stmt) {
+ Visit(stmt->expression());
+ Visit(stmt->statement());
+}
+
+
+void ALAA::VisitSwitchStatement(SwitchStatement* stmt) {
+ Visit(stmt->tag());
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ for (int i = 0; i < clauses->length(); i++) {
+ Visit(clauses->at(i));
+ }
+}
+
+
+void ALAA::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ Visit(stmt->try_block());
+ Visit(stmt->finally_block());
+}
+
+
+void ALAA::VisitClassLiteral(ClassLiteral* e) {
+ VisitIfNotNull(e->extends());
+ VisitIfNotNull(e->constructor());
+ ZoneList<ObjectLiteralProperty*>* properties = e->properties();
+ for (int i = 0; i < properties->length(); i++) {
+ Visit(properties->at(i)->value());
+ }
+}
+
+
+void ALAA::VisitConditional(Conditional* e) {
+ Visit(e->condition());
+ Visit(e->then_expression());
+ Visit(e->else_expression());
+}
+
+
+void ALAA::VisitObjectLiteral(ObjectLiteral* e) {
+ ZoneList<ObjectLiteralProperty*>* properties = e->properties();
+ for (int i = 0; i < properties->length(); i++) {
+ Visit(properties->at(i)->value());
+ }
+}
+
+
+void ALAA::VisitArrayLiteral(ArrayLiteral* e) { VisitExpressions(e->values()); }
+
+
+void ALAA::VisitYield(Yield* stmt) {
+ Visit(stmt->generator_object());
+ Visit(stmt->expression());
+}
+
+
+void ALAA::VisitThrow(Throw* stmt) { Visit(stmt->exception()); }
+
+
+void ALAA::VisitProperty(Property* e) {
+ Visit(e->obj());
+ Visit(e->key());
+}
+
+
+void ALAA::VisitCall(Call* e) {
+ Visit(e->expression());
+ VisitExpressions(e->arguments());
+}
+
+
+void ALAA::VisitCallNew(CallNew* e) {
+ Visit(e->expression());
+ VisitExpressions(e->arguments());
+}
+
+
+void ALAA::VisitCallRuntime(CallRuntime* e) {
+ VisitExpressions(e->arguments());
+}
+
+
+void ALAA::VisitUnaryOperation(UnaryOperation* e) { Visit(e->expression()); }
+
+
+void ALAA::VisitBinaryOperation(BinaryOperation* e) {
+ Visit(e->left());
+ Visit(e->right());
+}
+
+
+void ALAA::VisitCompareOperation(CompareOperation* e) {
+ Visit(e->left());
+ Visit(e->right());
+}
+
+
+void ALAA::VisitCaseClause(CaseClause* cc) {
+ if (!cc->is_default()) Visit(cc->label());
+ VisitStatements(cc->statements());
+}
+
+
+// ---------------------------------------------------------------------------
+// -- Interesting nodes-------------------------------------------------------
+// ---------------------------------------------------------------------------
+void ALAA::VisitModuleStatement(ModuleStatement* stmt) {
+ Visit(stmt->body());
+ // TODO(turbofan): can a module appear in a loop?
+ AnalyzeAssignment(stmt->proxy()->var());
+}
+
+
+void ALAA::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ Visit(stmt->try_block());
+ Visit(stmt->catch_block());
+ // TODO(turbofan): are catch variables well-scoped?
+ AnalyzeAssignment(stmt->variable());
+}
+
+
+void ALAA::VisitDoWhileStatement(DoWhileStatement* loop) {
+ Enter(loop);
+ Visit(loop->body());
+ Visit(loop->cond());
+ Exit(loop);
+}
+
+
+void ALAA::VisitWhileStatement(WhileStatement* loop) {
+ Enter(loop);
+ Visit(loop->cond());
+ Visit(loop->body());
+ Exit(loop);
+}
+
+
+void ALAA::VisitForStatement(ForStatement* loop) {
+ VisitIfNotNull(loop->init());
+ Enter(loop);
+ VisitIfNotNull(loop->cond());
+ Visit(loop->body());
+ VisitIfNotNull(loop->next());
+ Exit(loop);
+}
+
+
+void ALAA::VisitForInStatement(ForInStatement* loop) {
+ Enter(loop);
+ Visit(loop->each());
+ Visit(loop->subject());
+ Visit(loop->body());
+ Exit(loop);
+}
+
+
+void ALAA::VisitForOfStatement(ForOfStatement* loop) {
+ Enter(loop);
+ Visit(loop->each());
+ Visit(loop->subject());
+ Visit(loop->body());
+ Exit(loop);
+}
+
+
+void ALAA::VisitAssignment(Assignment* stmt) {
+ Expression* l = stmt->target();
+ Visit(l);
+ Visit(stmt->value());
+ if (l->IsVariableProxy()) AnalyzeAssignment(l->AsVariableProxy()->var());
+}
+
+
+void ALAA::VisitCountOperation(CountOperation* e) {
+ Expression* l = e->expression();
+ Visit(l);
+ if (l->IsVariableProxy()) AnalyzeAssignment(l->AsVariableProxy()->var());
+}
+
+
+void ALAA::AnalyzeAssignment(Variable* var) {
+ if (!loop_stack_.empty() && var->IsStackAllocated()) {
+ loop_stack_.back()->Add(GetVariableIndex(info()->scope(), var));
+ }
+}
+
+
+int ALAA::GetVariableIndex(Scope* scope, Variable* var) {
+ CHECK(var->IsStackAllocated());
+ if (var->is_this()) return 0;
+ if (var->IsParameter()) return 1 + var->index();
+ return 1 + scope->num_parameters() + var->index();
+}
+
+
+int LoopAssignmentAnalysis::GetAssignmentCountForTesting(Scope* scope,
+ Variable* var) {
+ int count = 0;
+ int var_index = AstLoopAssignmentAnalyzer::GetVariableIndex(scope, var);
+ for (size_t i = 0; i < list_.size(); i++) {
+ if (list_[i].second->Contains(var_index)) count++;
+ }
+ return count;
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/src/compiler/ast-loop-assignment-analyzer.h b/src/compiler/ast-loop-assignment-analyzer.h
new file mode 100644
index 0000000..daa94f9
--- /dev/null
+++ b/src/compiler/ast-loop-assignment-analyzer.h
@@ -0,0 +1,78 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
+#define V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
+
+#include "src/ast.h"
+#include "src/data-flow.h"
+#include "src/v8.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Variable;
+class Scope;
+
+namespace compiler {
+
+// The result of analyzing loop assignments.
+class LoopAssignmentAnalysis : public ZoneObject {
+ public:
+ BitVector* GetVariablesAssignedInLoop(IterationStatement* loop) {
+ for (size_t i = 0; i < list_.size(); i++) {
+ // TODO(turbofan): hashmap or binary search for loop assignments.
+ if (list_[i].first == loop) return list_[i].second;
+ }
+ UNREACHABLE(); // should never ask for loops that aren't here!
+ return NULL;
+ }
+
+ int GetAssignmentCountForTesting(Scope* scope, Variable* var);
+
+ private:
+ friend class AstLoopAssignmentAnalyzer;
+ explicit LoopAssignmentAnalysis(Zone* zone) : list_(zone) {}
+ ZoneVector<std::pair<IterationStatement*, BitVector*>> list_;
+};
+
+
+// The class that performs loop assignment analysis by walking the AST.
+class AstLoopAssignmentAnalyzer : public AstVisitor {
+ public:
+ AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info);
+
+ LoopAssignmentAnalysis* Analyze();
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ static int GetVariableIndex(Scope* scope, Variable* var);
+
+ private:
+ CompilationInfo* info_;
+ ZoneDeque<BitVector*> loop_stack_;
+ LoopAssignmentAnalysis* result_;
+
+ CompilationInfo* info() { return info_; }
+
+ void Enter(IterationStatement* loop);
+ void Exit(IterationStatement* loop);
+
+ void VisitIfNotNull(AstNode* node) {
+ if (node != NULL) Visit(node);
+ }
+
+ void AnalyzeAssignment(Variable* var);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+ DISALLOW_COPY_AND_ASSIGN(AstLoopAssignmentAnalyzer);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
diff --git a/src/compiler/control-builders.cc b/src/compiler/control-builders.cc
index 3b7d05b..be24a88 100644
--- a/src/compiler/control-builders.cc
+++ b/src/compiler/control-builders.cc
@@ -9,8 +9,8 @@
namespace compiler {
-void IfBuilder::If(Node* condition) {
- builder_->NewBranch(condition);
+void IfBuilder::If(Node* condition, BranchHint hint) {
+ builder_->NewBranch(condition, hint);
else_environment_ = environment()->CopyForConditional();
}
@@ -32,9 +32,9 @@
}
-void LoopBuilder::BeginLoop() {
+void LoopBuilder::BeginLoop(BitVector* assigned) {
builder_->NewLoop();
- loop_environment_ = environment()->CopyForLoop();
+ loop_environment_ = environment()->CopyForLoop(assigned);
continue_environment_ = environment()->CopyAsUnreachable();
break_environment_ = environment()->CopyAsUnreachable();
}
diff --git a/src/compiler/control-builders.h b/src/compiler/control-builders.h
index eb674bf..4b5fc3a 100644
--- a/src/compiler/control-builders.h
+++ b/src/compiler/control-builders.h
@@ -14,7 +14,6 @@
namespace internal {
namespace compiler {
-
// Base class for all control builders. Also provides a common interface for
// control builders to handle 'break' and 'continue' statements when they are
// used to model breakable statements.
@@ -49,7 +48,7 @@
else_environment_(NULL) {}
// Primitive control commands.
- void If(Node* condition);
+ void If(Node* condition, BranchHint hint = BranchHint::kNone);
void Then();
void Else();
void End();
@@ -70,7 +69,7 @@
break_environment_(NULL) {}
// Primitive control commands.
- void BeginLoop();
+ void BeginLoop(BitVector* assigned);
void EndBody();
void EndLoop();
diff --git a/src/compiler/control-reducer.cc b/src/compiler/control-reducer.cc
index e1bd0c9..533fb23 100644
--- a/src/compiler/control-reducer.cc
+++ b/src/compiler/control-reducer.cc
@@ -18,7 +18,7 @@
enum Reachability { kFromStart = 8 };
#define TRACE(x) \
- if (FLAG_trace_turbo) PrintF x
+ if (FLAG_trace_turbo_reduction) PrintF x
class ControlReducerImpl {
public:
diff --git a/src/compiler/graph-builder.cc b/src/compiler/graph-builder.cc
index ae55b95..e10efb1 100644
--- a/src/compiler/graph-builder.cc
+++ b/src/compiler/graph-builder.cc
@@ -166,11 +166,22 @@
}
-void StructuredGraphBuilder::Environment::PrepareForLoop() {
+void StructuredGraphBuilder::Environment::PrepareForLoop(BitVector* assigned) {
Node* control = GetControlDependency();
- for (int i = 0; i < static_cast<int>(values()->size()); ++i) {
- Node* phi = builder_->NewPhi(1, values()->at(i), control);
- values()->at(i) = phi;
+ int size = static_cast<int>(values()->size());
+ if (assigned == NULL) {
+ // Assume that everything is updated in the loop.
+ for (int i = 0; i < size; ++i) {
+ Node* phi = builder_->NewPhi(1, values()->at(i), control);
+ values()->at(i) = phi;
+ }
+ } else {
+ // Only build phis for those locals assigned in this loop.
+ for (int i = 0; i < size; ++i) {
+ if (i < assigned->length() && !assigned->Contains(i)) continue;
+ Node* phi = builder_->NewPhi(1, values()->at(i), control);
+ values()->at(i) = phi;
+ }
}
Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
UpdateEffectDependency(effect);
diff --git a/src/compiler/graph-builder.h b/src/compiler/graph-builder.h
index 90df6ca..0fdd769 100644
--- a/src/compiler/graph-builder.h
+++ b/src/compiler/graph-builder.h
@@ -14,6 +14,9 @@
namespace v8 {
namespace internal {
+
+class BitVector;
+
namespace compiler {
class Node;
@@ -99,8 +102,8 @@
Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition) {
- return NewNode(common()->Branch(), condition);
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
}
protected:
@@ -215,8 +218,8 @@
}
// Copies this environment at a loop header control-flow point.
- Environment* CopyForLoop() {
- PrepareForLoop();
+ Environment* CopyForLoop(BitVector* assigned) {
+ PrepareForLoop(assigned);
return builder()->CopyEnvironment(this);
}
@@ -230,7 +233,7 @@
NodeVector* values() { return &values_; }
// Prepare environment to be used as loop header.
- void PrepareForLoop();
+ void PrepareForLoop(BitVector* assigned);
private:
StructuredGraphBuilder* builder_;
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index ca4bf1b..e1a9cb7 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -881,8 +881,9 @@
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::Flag::kNoFlags;
+ return MachineOperatorBuilder::kNoFlags;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index bab3308..58d4904 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -59,17 +59,6 @@
}
-// TODO(sigurds) Find a home for this function and reuse it everywhere (esp. in
-// test cases, where similar code is currently duplicated).
-static void Parse(Handle<JSFunction> function, CompilationInfoWithZone* info) {
- CHECK(Parser::Parse(info));
- CHECK(Rewriter::Rewrite(info));
- CHECK(Scope::Analyze(info));
- CHECK(AstNumbering::Renumber(info->function(), info->zone()));
- CHECK(Compiler::EnsureDeoptimizationSupport(info));
-}
-
-
// A facade on a JSFunction's graph to facilitate inlining. It assumes the
// that the function graph has only one return statement, and provides
// {UnifyReturn} to convert a function graph to that end.
@@ -385,7 +374,9 @@
}
CompilationInfoWithZone info(function);
- Parse(function, &info);
+ // TODO(wingo): ParseAndAnalyze can fail due to stack overflow.
+ CHECK(Compiler::ParseAndAnalyze(&info));
+ CHECK(Compiler::EnsureDeoptimizationSupport(&info));
if (info.scope()->arguments() != NULL && info.strict_mode() != STRICT) {
// For now do not inline functions that use their arguments array.
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index fb510e0..e4d25bb 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -546,7 +546,48 @@
Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
return ReplaceWith(inv);
}
- // TODO(turbofan): js-typed-lowering of ToBoolean(string)
+ if (input_type->Is(Type::String())) {
+ // JSToBoolean(x:string) => BooleanNot(NumberEqual(x.length, #0))
+ FieldAccess access = AccessBuilder::ForStringLength();
+ Node* length = graph()->NewNode(simplified()->LoadField(access), input,
+ graph()->start(), graph()->start());
+ Node* cmp = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
+ return ReplaceWith(inv);
+ }
+ // TODO(turbofan): We need some kinda of PrimitiveToBoolean simplified
+ // operator, then we can do the pushing in the SimplifiedOperatorReducer
+ // and do not need to protect against stack overflow (because of backedges
+ // in phis) below.
+ if (input->opcode() == IrOpcode::kPhi &&
+ input_type->Is(
+ Type::Union(Type::Boolean(), Type::OrderedNumber(), zone()))) {
+ // JSToBoolean(phi(x1,...,xn):ordered-number|boolean)
+ // => phi(JSToBoolean(x1),...,JSToBoolean(xn))
+ int input_count = input->InputCount() - 1;
+ Node** inputs = zone()->NewArray<Node*>(input_count + 1);
+ for (int i = 0; i < input_count; ++i) {
+ Node* value = input->InputAt(i);
+ Type* value_type = NodeProperties::GetBounds(value).upper;
+ // Recursively try to reduce the value first.
+ Reduction result = (value_type->Is(Type::Boolean()) ||
+ value_type->Is(Type::OrderedNumber()))
+ ? ReduceJSToBooleanInput(value)
+ : NoChange();
+ if (result.Changed()) {
+ inputs[i] = result.replacement();
+ } else {
+ inputs[i] = graph()->NewNode(javascript()->ToBoolean(), value,
+ jsgraph()->ZeroConstant(),
+ graph()->start(), graph()->start());
+ }
+ }
+ inputs[input_count] = input->InputAt(input_count);
+ Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, input_count),
+ input_count + 1, inputs);
+ return ReplaceWith(phi);
+ }
return NoChange();
}
@@ -565,21 +606,18 @@
Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
ExternalArrayType type = array->type();
- uint32_t byte_length;
- if (array->byte_length()->ToUint32(&byte_length) &&
- byte_length <= static_cast<uint32_t>(kMaxInt)) {
+ double byte_length = array->byte_length()->Number();
+ if (byte_length <= kMaxInt) {
Handle<ExternalArray> elements =
Handle<ExternalArray>::cast(handle(array->elements()));
Node* pointer = jsgraph()->IntPtrConstant(
bit_cast<intptr_t>(elements->external_pointer()));
- Node* length = jsgraph()->Uint32Constant(
- static_cast<uint32_t>(byte_length / array->element_size()));
+ Node* length = jsgraph()->Constant(byte_length / array->element_size());
Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
Node* load = graph()->NewNode(
simplified()->LoadElement(
AccessBuilder::ForTypedArrayElement(type, true)),
- pointer, key, length, effect, control);
+ pointer, key, length, effect);
return ReplaceEagerly(node, load);
}
}
@@ -603,15 +641,13 @@
Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
ExternalArrayType type = array->type();
- uint32_t byte_length;
- if (array->byte_length()->ToUint32(&byte_length) &&
- byte_length <= static_cast<uint32_t>(kMaxInt)) {
+ double byte_length = array->byte_length()->Number();
+ if (byte_length <= kMaxInt) {
Handle<ExternalArray> elements =
Handle<ExternalArray>::cast(handle(array->elements()));
Node* pointer = jsgraph()->IntPtrConstant(
bit_cast<intptr_t>(elements->external_pointer()));
- Node* length = jsgraph()->Uint32Constant(
- static_cast<uint32_t>(byte_length / array->element_size()));
+ Node* length = jsgraph()->Constant(byte_length / array->element_size());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* store = graph()->NewNode(
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index f285b8a..a40da13 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -109,6 +109,15 @@
return ReplaceInt32(m.left().Value() & m.right().Value());
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) { // (x & K) & K => x & K
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(
+ 1, Int32Constant(m.right().Value() & mleft.right().Value()));
+ return Changed(node);
+ }
+ }
break;
}
case IrOpcode::kWord32Or: {
@@ -546,6 +555,7 @@
node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, m.left().node());
+ node->TrimInputCount(2);
return Changed(node);
}
if (m.right().HasValue()) {
@@ -567,6 +577,7 @@
node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, quotient);
+ node->TrimInputCount(2);
return Changed(node);
}
return Replace(quotient);
@@ -635,6 +646,7 @@
node->set_op(machine()->Int32Sub());
DCHECK_EQ(dividend, node->InputAt(0));
node->ReplaceInput(1, Int32Mul(quotient, Int32Constant(divisor)));
+ node->TrimInputCount(2);
return Changed(node);
}
}
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 2c87189..f717729 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -60,17 +60,21 @@
public:
// Flags that specify which operations are available. This is useful
// for operations that are unsupported by some back-ends.
- enum class Flag : unsigned {
- kNoFlags = 0,
- kFloat64Floor = 1 << 0,
- kFloat64Ceil = 1 << 1,
- kFloat64RoundTruncate = 1 << 2,
- kFloat64RoundTiesAway = 1 << 3
+ enum Flag {
+ kNoFlags = 0u,
+ kFloat64Floor = 1u << 0,
+ kFloat64Ceil = 1u << 1,
+ kFloat64RoundTruncate = 1u << 2,
+ kFloat64RoundTiesAway = 1u << 3,
+ kInt32DivIsSafe = 1u << 4,
+ kInt32ModIsSafe = 1u << 5,
+ kUint32DivIsSafe = 1u << 6,
+ kUint32ModIsSafe = 1u << 7
};
typedef base::Flags<Flag, unsigned> Flags;
explicit MachineOperatorBuilder(MachineType word = kMachPtr,
- Flags supportedOperators = Flag::kNoFlags);
+ Flags supportedOperators = kNoFlags);
const Operator* Word32And();
const Operator* Word32Or();
@@ -104,6 +108,10 @@
const Operator* Uint32LessThan();
const Operator* Uint32LessThanOrEqual();
const Operator* Uint32Mod();
+ bool Int32DivIsSafe() const { return flags_ & kInt32DivIsSafe; }
+ bool Int32ModIsSafe() const { return flags_ & kInt32ModIsSafe; }
+ bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
+ bool Uint32ModIsSafe() const { return flags_ & kUint32ModIsSafe; }
const Operator* Int64Add();
const Operator* Int64Sub();
@@ -153,14 +161,10 @@
const Operator* Float64Ceil();
const Operator* Float64RoundTruncate();
const Operator* Float64RoundTiesAway();
- bool HasFloat64Floor() { return flags_ & Flag::kFloat64Floor; }
- bool HasFloat64Ceil() { return flags_ & Flag::kFloat64Ceil; }
- bool HasFloat64RoundTruncate() {
- return flags_ & Flag::kFloat64RoundTruncate;
- }
- bool HasFloat64RoundTiesAway() {
- return flags_ & Flag::kFloat64RoundTiesAway;
- }
+ bool HasFloat64Floor() { return flags_ & kFloat64Floor; }
+ bool HasFloat64Ceil() { return flags_ & kFloat64Ceil; }
+ bool HasFloat64RoundTruncate() { return flags_ & kFloat64RoundTruncate; }
+ bool HasFloat64RoundTiesAway() { return flags_ & kFloat64RoundTiesAway; }
// load [base + index]
const Operator* Load(LoadRepresentation rep);
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index dc1749a..9be6bb6 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -651,8 +651,12 @@
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::Flag::kNoFlags;
+ return MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kInt32ModIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kUint32ModIsSafe;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index 4cc4ff7..5803a00 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -96,42 +96,6 @@
typedef FloatMatcher<double, IrOpcode::kNumberConstant> NumberMatcher;
-// A pattern matcher for any numberic constant.
-struct NumericValueMatcher : public NodeMatcher {
- explicit NumericValueMatcher(Node* const node) : NodeMatcher(node) {
- switch (opcode()) {
- case IrOpcode::kInt32Constant:
- has_value_ = true;
- value_ = OpParameter<int32_t>(node);
- break;
- case IrOpcode::kFloat32Constant:
- has_value_ = true;
- value_ = OpParameter<float>(node);
- break;
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kNumberConstant:
- has_value_ = true;
- value_ = OpParameter<double>(node);
- break;
- default:
- has_value_ = false;
- value_ = 0; // Make the compiler happy.
- break;
- }
- }
-
- bool HasValue() const { return has_value_; }
- double Value() const {
- DCHECK(HasValue());
- return value_;
- }
-
- private:
- double value_;
- bool has_value_;
-};
-
-
// A pattern matcher for heap object constants.
template <typename T>
struct HeapObjectMatcher FINAL
@@ -175,6 +139,7 @@
typedef BinopMatcher<Int64Matcher, Int64Matcher> Int64BinopMatcher;
typedef BinopMatcher<Uint64Matcher, Uint64Matcher> Uint64BinopMatcher;
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
+typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
// Fairly intel-specify node matcher used for matching scale factors in
diff --git a/src/compiler/operator-properties-inl.h b/src/compiler/operator-properties-inl.h
index 7c2ae16..0488b1b 100644
--- a/src/compiler/operator-properties-inl.h
+++ b/src/compiler/operator-properties-inl.h
@@ -122,8 +122,11 @@
case IrOpcode::kPhi:
case IrOpcode::kEffectPhi:
case IrOpcode::kLoad:
- case IrOpcode::kLoadElement:
case IrOpcode::kLoadField:
+ case IrOpcode::kInt32Div:
+ case IrOpcode::kInt32Mod:
+ case IrOpcode::kUint32Div:
+ case IrOpcode::kUint32Mod:
return 1;
#define OPCODE_CASE(x) case IrOpcode::k##x:
CONTROL_OP_LIST(OPCODE_CASE)
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index 346e072..d9e67ee 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -35,36 +35,29 @@
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
schedule_queue_(zone),
- node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
- has_floating_control_(false) {}
+ node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
Schedule* Scheduler::ComputeSchedule(ZonePool* zone_pool, Graph* graph) {
- Schedule* schedule;
- bool had_floating_control = false;
- do {
- ZonePool::Scope zone_scope(zone_pool);
- schedule = new (graph->zone())
- Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
- Scheduler scheduler(zone_scope.zone(), graph, schedule);
+ ZonePool::Scope zone_scope(zone_pool);
+ Schedule* schedule = new (graph->zone())
+ Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
+ Scheduler scheduler(zone_scope.zone(), graph, schedule);
- scheduler.BuildCFG();
- scheduler.ComputeSpecialRPONumbering();
- scheduler.GenerateImmediateDominatorTree();
+ scheduler.BuildCFG();
+ scheduler.ComputeSpecialRPONumbering();
+ scheduler.GenerateImmediateDominatorTree();
- scheduler.PrepareUses();
- scheduler.ScheduleEarly();
- scheduler.ScheduleLate();
-
- had_floating_control = scheduler.ConnectFloatingControl();
- } while (had_floating_control);
+ scheduler.PrepareUses();
+ scheduler.ScheduleEarly();
+ scheduler.ScheduleLate();
return schedule;
}
Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
- SchedulerData def = {NULL, 0, false, false, kUnknown};
+ SchedulerData def = {schedule_->start(), 0, false, false, kUnknown};
return def;
}
@@ -92,19 +85,18 @@
break;
}
#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
- CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
+ CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
#undef DEFINE_FLOATING_CONTROL_CASE
- {
- // Control nodes that were not control-reachable from end may float.
- data->placement_ = kSchedulable;
- if (!data->is_connected_control_) {
- data->is_floating_control_ = true;
- has_floating_control_ = true;
- Trace("Floating control found: #%d:%s\n", node->id(),
- node->op()->mnemonic());
- }
- break;
+ {
+ // Control nodes that were not control-reachable from end may float.
+ data->placement_ = kSchedulable;
+ if (!data->is_connected_control_) {
+ data->is_floating_control_ = true;
+ Trace("Floating control found: #%d:%s\n", node->id(),
+ node->op()->mnemonic());
}
+ break;
+ }
default:
data->placement_ = kSchedulable;
break;
@@ -114,6 +106,60 @@
}
+void Scheduler::UpdatePlacement(Node* node, Placement placement) {
+ SchedulerData* data = GetData(node);
+ if (data->placement_ != kUnknown) { // Trap on mutation, not initialization.
+ switch (node->opcode()) {
+ case IrOpcode::kParameter:
+ // Parameters are fixed once and for all.
+ UNREACHABLE();
+ break;
+ case IrOpcode::kPhi:
+ case IrOpcode::kEffectPhi: {
+ // Phis and effect phis are coupled to their respective blocks.
+ DCHECK_EQ(Scheduler::kCoupled, data->placement_);
+ DCHECK_EQ(Scheduler::kFixed, placement);
+ Node* control = NodeProperties::GetControlInput(node);
+ BasicBlock* block = schedule_->block(control);
+ schedule_->AddNode(block, node);
+ // TODO(mstarzinger): Cheap hack to make sure unscheduled use count of
+ // control does not drop below zero. This might cause the control to be
+ // queued for scheduling more than once, which makes this ugly!
+ ++(GetData(control)->unscheduled_count_);
+ break;
+ }
+#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
+ CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
+#undef DEFINE_FLOATING_CONTROL_CASE
+ {
+ // Control nodes force coupled uses to be placed.
+ Node::Uses uses = node->uses();
+ for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
+ if (GetPlacement(*i) == Scheduler::kCoupled) {
+ DCHECK_EQ(node, NodeProperties::GetControlInput(*i));
+ UpdatePlacement(*i, placement);
+ }
+ }
+ break;
+ }
+ default:
+ DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
+ DCHECK_EQ(Scheduler::kScheduled, placement);
+ break;
+ }
+ // Reduce the use count of the node's inputs to potentially make them
+ // schedulable. If all the uses of a node have been scheduled, then the node
+ // itself can be scheduled.
+ for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+ // TODO(mstarzinger): Another cheap hack for use counts.
+ if (GetData(*i)->placement_ == kFixed) continue;
+ DecrementUnscheduledUseCount(*i, i.edge().from());
+ }
+ }
+ data->placement_ = placement;
+}
+
+
void Scheduler::IncrementUnscheduledUseCount(Node* node, Node* from) {
if (GetPlacement(node) == kCoupled) {
// Use count for coupled nodes is summed up on their control.
@@ -177,21 +223,19 @@
// Internal class to build a control flow graph (i.e the basic blocks and edges
-// between them within a Schedule) from the node graph.
-// Visits the control edges of the graph backwards from end in order to find
-// the connected control subgraph, needed for scheduling.
+// between them within a Schedule) from the node graph. Visits control edges of
+// the graph backwards from an end node in order to find the connected control
+// subgraph, needed for scheduling.
class CFGBuilder {
public:
- Scheduler* scheduler_;
- Schedule* schedule_;
- ZoneQueue<Node*> queue_;
- NodeVector control_;
-
CFGBuilder(Zone* zone, Scheduler* scheduler)
: scheduler_(scheduler),
schedule_(scheduler->schedule_),
queue_(zone),
- control_(zone) {}
+ control_(zone),
+ component_head_(NULL),
+ component_start_(NULL),
+ component_end_(NULL) {}
// Run the control flow graph construction algorithm by walking the graph
// backwards from end through control edges, building and connecting the
@@ -217,10 +261,40 @@
FixNode(schedule_->end(), graph->end());
}
+ // Run the control flow graph construction for a minimal control-connected
+ // component ending in {node} and merge that component into an existing
+ // control flow graph at the bottom of {block}.
+ void Run(BasicBlock* block, Node* node) {
+ Queue(node);
+
+ component_start_ = block;
+ component_end_ = schedule_->block(node);
+ while (!queue_.empty()) { // Breadth-first backwards traversal.
+ Node* node = queue_.front();
+ queue_.pop();
+ bool is_dom = true;
+ int max = NodeProperties::PastControlIndex(node);
+ for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+ is_dom = is_dom &&
+ !scheduler_->GetData(node->InputAt(i))->is_floating_control_;
+ Queue(node->InputAt(i));
+ }
+ // TODO(mstarzinger): This is a hacky way to find component dominator.
+ if (is_dom) component_head_ = node;
+ }
+ DCHECK_NOT_NULL(component_head_);
+
+ for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
+ scheduler_->GetData(*i)->is_floating_control_ = false;
+ ConnectBlocks(*i); // Connect block to its predecessor/successors.
+ }
+ }
+
+ private:
void FixNode(BasicBlock* block, Node* node) {
schedule_->AddNode(block, node);
scheduler_->GetData(node)->is_connected_control_ = true;
- scheduler_->GetData(node)->placement_ = Scheduler::kFixed;
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
}
void Queue(Node* node) {
@@ -257,11 +331,11 @@
ConnectMerge(node);
break;
case IrOpcode::kBranch:
- scheduler_->schedule_root_nodes_.push_back(node);
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectBranch(node);
break;
case IrOpcode::kReturn:
- scheduler_->schedule_root_nodes_.push_back(node);
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectReturn(node);
break;
default:
@@ -318,17 +392,10 @@
}
void ConnectBranch(Node* branch) {
- Node* branch_block_node = NodeProperties::GetControlInput(branch);
- BasicBlock* branch_block = schedule_->block(branch_block_node);
- DCHECK(branch_block != NULL);
-
BasicBlock* successor_blocks[2];
CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
IrOpcode::kIfFalse);
- TraceConnect(branch, branch_block, successor_blocks[0]);
- TraceConnect(branch, branch_block, successor_blocks[1]);
-
// Consider branch hints.
// TODO(turbofan): Propagate the deferred flag to all blocks dominated by
// this IfTrue/IfFalse later.
@@ -343,8 +410,21 @@
break;
}
- schedule_->AddBranch(branch_block, branch, successor_blocks[0],
- successor_blocks[1]);
+ if (branch == component_head_) {
+ TraceConnect(branch, component_start_, successor_blocks[0]);
+ TraceConnect(branch, component_start_, successor_blocks[1]);
+ schedule_->InsertBranch(component_start_, component_end_, branch,
+ successor_blocks[0], successor_blocks[1]);
+ } else {
+ Node* branch_block_node = NodeProperties::GetControlInput(branch);
+ BasicBlock* branch_block = schedule_->block(branch_block_node);
+ DCHECK(branch_block != NULL);
+
+ TraceConnect(branch, branch_block, successor_blocks[0]);
+ TraceConnect(branch, branch_block, successor_blocks[1]);
+ schedule_->AddBranch(branch_block, branch, successor_blocks[0],
+ successor_blocks[1]);
+ }
}
void ConnectMerge(Node* merge) {
@@ -385,13 +465,25 @@
return (node->opcode() == IrOpcode::kMerge &&
node == scheduler_->graph_->end()->InputAt(0));
}
+
+ Scheduler* scheduler_;
+ Schedule* schedule_;
+ ZoneQueue<Node*> queue_;
+ NodeVector control_;
+ Node* component_head_;
+ BasicBlock* component_start_;
+ BasicBlock* component_end_;
};
void Scheduler::BuildCFG() {
Trace("--- CREATING CFG -------------------------------------------\n");
+
+ // Build a control-flow graph for the main control-connected component that
+ // is being spanned by the graph's start and end nodes.
CFGBuilder cfg_builder(zone_, this);
cfg_builder.Run();
+
// Initialize per-block data.
scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
}
@@ -867,7 +959,7 @@
DCHECK(current_pred != end);
BasicBlock* dominator = *current_pred;
++current_pred;
- // For multiple predecessors, walk up the rpo ordering until a common
+ // For multiple predecessors, walk up the RPO ordering until a common
// dominator is found.
int current_rpo_pos = GetRPONumber(current_rpo);
while (current_pred != end) {
@@ -952,76 +1044,92 @@
// Phase 4: Schedule nodes early.
-class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
+class ScheduleEarlyNodeVisitor {
public:
- explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
- : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
+ ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
+ : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
- GenericGraphVisit::Control Pre(Node* node) {
- Scheduler::SchedulerData* data = scheduler_->GetData(node);
- if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
- // Fixed nodes already know their schedule early position.
- if (data->minimum_block_ == NULL) {
- data->minimum_block_ = schedule_->block(node);
- Trace("Preschedule #%d:%s minimum_rpo = %d (fixed)\n", node->id(),
- node->op()->mnemonic(), data->minimum_block_->rpo_number());
- }
- } else {
- // For unfixed nodes the minimum RPO is the max of all of the inputs.
- if (data->minimum_block_ == NULL) {
- data->minimum_block_ = ComputeMaximumInputRPO(node);
- if (data->minimum_block_ == NULL) return GenericGraphVisit::REENTER;
- Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
- node->op()->mnemonic(), data->minimum_block_->rpo_number());
+ // Run the schedule early algorithm on a set of fixed root nodes.
+ void Run(NodeVector* roots) {
+ for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
+ queue_.push(*i);
+ while (!queue_.empty()) {
+ VisitNode(queue_.front());
+ queue_.pop();
}
}
- DCHECK_NE(data->minimum_block_, NULL);
- return GenericGraphVisit::CONTINUE;
- }
-
- GenericGraphVisit::Control Post(Node* node) {
- Scheduler::SchedulerData* data = scheduler_->GetData(node);
- if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
- // For unfixed nodes the minimum RPO is the max of all of the inputs.
- if (data->minimum_block_ == NULL) {
- data->minimum_block_ = ComputeMaximumInputRPO(node);
- Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
- node->op()->mnemonic(), data->minimum_block_->rpo_number());
- }
- }
- DCHECK_NE(data->minimum_block_, NULL);
- return GenericGraphVisit::CONTINUE;
- }
-
- // Computes the maximum of the minimum RPOs for all inputs. If the maximum
- // cannot be determined (i.e. minimum RPO for at least one input is {NULL}),
- // then {NULL} is returned.
- BasicBlock* ComputeMaximumInputRPO(Node* node) {
- BasicBlock* max_block = schedule_->start();
- for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
- DCHECK_NE(node, *i); // Loops only exist for fixed nodes.
- BasicBlock* block = scheduler_->GetData(*i)->minimum_block_;
- if (block == NULL) return NULL;
- if (block->rpo_number() > max_block->rpo_number()) {
- max_block = block;
- }
- }
- return max_block;
}
private:
+ // Visits one node from the queue and propagates its current schedule early
+ // position to all uses. This in turn might push more nodes onto the queue.
+ void VisitNode(Node* node) {
+ Scheduler::SchedulerData* data = scheduler_->GetData(node);
+
+ // Fixed nodes already know their schedule early position.
+ if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+ DCHECK_EQ(schedule_->start(), data->minimum_block_);
+ data->minimum_block_ = schedule_->block(node);
+ Trace("Fixing #%d:%s minimum_rpo = %d\n", node->id(),
+ node->op()->mnemonic(), data->minimum_block_->rpo_number());
+ }
+
+ // No need to propagate unconstrained schedule early positions.
+ if (data->minimum_block_ == schedule_->start()) return;
+
+ // Propagate schedule early position.
+ DCHECK(data->minimum_block_ != NULL);
+ Node::Uses uses = node->uses();
+ for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
+ PropagateMinimumRPOToNode(data->minimum_block_, *i);
+ }
+ }
+
+ // Propagates {block} as another minimum RPO placement into the given {node}.
+ // This has the net effect of computing the maximum of the minimum RPOs for
+ // all inputs to {node} when the queue has been fully processed.
+ void PropagateMinimumRPOToNode(BasicBlock* block, Node* node) {
+ Scheduler::SchedulerData* data = scheduler_->GetData(node);
+
+ // No need to propagate to fixed node, it's guaranteed to be a root.
+ if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
+
+ // Coupled nodes influence schedule early position of their control.
+ if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
+ Node* control = NodeProperties::GetControlInput(node);
+ PropagateMinimumRPOToNode(block, control);
+ }
+
+ // Propagate new position if it is larger than the current.
+ if (block->rpo_number() > data->minimum_block_->rpo_number()) {
+ data->minimum_block_ = block;
+ queue_.push(node);
+ Trace("Propagating #%d:%s minimum_rpo = %d\n", node->id(),
+ node->op()->mnemonic(), data->minimum_block_->rpo_number());
+ }
+ }
+
Scheduler* scheduler_;
Schedule* schedule_;
+ ZoneQueue<Node*> queue_;
};
void Scheduler::ScheduleEarly() {
Trace("--- SCHEDULE EARLY -----------------------------------------\n");
+ if (FLAG_trace_turbo_scheduler) {
+ Trace("roots: ");
+ for (NodeVectorIter i = schedule_root_nodes_.begin();
+ i != schedule_root_nodes_.end(); ++i) {
+ Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
+ }
+ Trace("\n");
+ }
// Compute the minimum RPO for each node thereby determining the earliest
// position each node could be placed within a valid schedule.
- ScheduleEarlyNodeVisitor visitor(this);
- graph_->VisitNodeInputsFromEnd(&visitor);
+ ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
+ schedule_early_visitor.Run(&schedule_root_nodes_);
}
@@ -1075,6 +1183,7 @@
// Determine the dominating block for all of the uses of this node. It is
// the latest block that this node can be scheduled in.
+ Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
BasicBlock* block = GetCommonDominatorOfUses(node);
DCHECK_NOT_NULL(block);
@@ -1095,10 +1204,14 @@
hoist_block = GetPreHeader(hoist_block);
}
- ScheduleNode(block, node);
+ // Schedule the node or a floating control structure.
+ if (NodeProperties::IsControl(node)) {
+ ScheduleFloatingControl(block, node);
+ } else {
+ ScheduleNode(block, node);
+ }
}
- private:
BasicBlock* GetPreHeader(BasicBlock* block) {
if (block->IsLoopHeader()) {
return block->dominator();
@@ -1129,7 +1242,7 @@
// If the use is from a coupled (i.e. floating) phi, compute the common
// dominator of its uses. This will not recurse more than one level.
if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
- Trace(" inspecting uses of coupled phi #%d:%s\n", use->id(),
+ Trace(" inspecting uses of coupled #%d:%s\n", use->id(),
use->op()->mnemonic());
DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
return GetCommonDominatorOfUses(use);
@@ -1152,16 +1265,15 @@
return result;
}
+ void ScheduleFloatingControl(BasicBlock* block, Node* node) {
+ DCHECK(scheduler_->GetData(node)->is_floating_control_);
+ scheduler_->FuseFloatingControl(block, node);
+ }
+
void ScheduleNode(BasicBlock* block, Node* node) {
schedule_->PlanNode(block, node);
scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
-
- // Reduce the use count of the node's inputs to potentially make them
- // schedulable. If all the uses of a node have been scheduled, then the node
- // itself can be scheduled.
- for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
- scheduler_->DecrementUnscheduledUseCount(*i, i.edge().from());
- }
+ scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
}
Scheduler* scheduler_;
@@ -1199,97 +1311,53 @@
// -----------------------------------------------------------------------------
-bool Scheduler::ConnectFloatingControl() {
- if (!has_floating_control_) return false;
-
- Trace("Connecting floating control...\n");
-
- // Process blocks and instructions backwards to find and connect floating
- // control nodes into the control graph according to the block they were
- // scheduled into.
- int max = static_cast<int>(schedule_->rpo_order()->size());
- for (int i = max - 1; i >= 0; i--) {
- BasicBlock* block = schedule_->rpo_order()->at(i);
- // TODO(titzer): we place at most one floating control structure per
- // basic block because scheduling currently can interleave phis from
- // one subgraph with the merges from another subgraph.
- for (size_t j = 0; j < block->NodeCount(); j++) {
- Node* node = block->NodeAt(block->NodeCount() - 1 - j);
- SchedulerData* data = GetData(node);
- if (data->is_floating_control_ && !data->is_connected_control_) {
- Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(),
- node->op()->mnemonic(), block->id().ToInt());
- ConnectFloatingControlSubgraph(block, node);
- break;
- }
- }
+void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
+ Trace("--- FUSE FLOATING CONTROL ----------------------------------\n");
+ if (FLAG_trace_turbo_scheduler) {
+ OFStream os(stdout);
+ os << "Schedule before control flow fusion:\n" << *schedule_;
}
- return true;
+ // Iterate on phase 1: Build control-flow graph.
+ CFGBuilder cfg_builder(zone_, this);
+ cfg_builder.Run(block, node);
+
+ // Iterate on phase 2: Compute special RPO and dominator tree.
+ // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
+ BasicBlockVector* rpo = schedule_->rpo_order();
+ for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
+ BasicBlock* block = *i;
+ block->set_rpo_number(-1);
+ block->set_loop_header(NULL);
+ block->set_loop_depth(0);
+ block->set_loop_end(-1);
+ }
+ schedule_->rpo_order()->clear();
+ SpecialRPONumberer numberer(zone_, schedule_);
+ numberer.ComputeSpecialRPO();
+ GenerateImmediateDominatorTree();
+ scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
+
+ // Move previously planned nodes.
+ // TODO(mstarzinger): Improve that by supporting bulk moves.
+ MovePlannedNodes(block, schedule_->block(node));
+
+ if (FLAG_trace_turbo_scheduler) {
+ OFStream os(stdout);
+ os << "Schedule after control flow fusion:" << *schedule_;
+ }
}
-void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
- Node* block_start = block->NodeAt(0);
- DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
- // Find the current "control successor" of the node that starts the block
- // by searching the control uses for a control input edge from a connected
- // control node.
- Node* control_succ = NULL;
- for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
- ++i) {
- Node::Edge edge = i.edge();
- if (NodeProperties::IsControlEdge(edge) &&
- GetData(edge.from())->is_connected_control_) {
- DCHECK_EQ(NULL, control_succ);
- control_succ = edge.from();
- control_succ->ReplaceInput(edge.index(), end);
- }
+void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
+ Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(),
+ to->id().ToInt());
+ NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
+ for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) {
+ schedule_->SetBlockForNode(to, *i);
+ scheduled_nodes_[to->id().ToSize()].push_back(*i);
}
- DCHECK_NE(NULL, control_succ);
- Trace(" Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
- end->id(), end->op()->mnemonic(), control_succ->id(),
- control_succ->op()->mnemonic(), block_start->id(),
- block_start->op()->mnemonic());
-
- // Find the "start" node of the control subgraph, which should be the
- // unique node that is itself floating control but has a control input that
- // is not floating.
- Node* start = NULL;
- ZoneQueue<Node*> queue(zone_);
- queue.push(end);
- GetData(end)->is_connected_control_ = true;
- while (!queue.empty()) {
- Node* node = queue.front();
- queue.pop();
- Trace(" Search #%d:%s for control subgraph start\n", node->id(),
- node->op()->mnemonic());
- int max = NodeProperties::PastControlIndex(node);
- for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
- Node* input = node->InputAt(i);
- SchedulerData* data = GetData(input);
- if (data->is_floating_control_) {
- // {input} is floating control.
- if (!data->is_connected_control_) {
- // First time seeing {input} during this traversal, queue it.
- queue.push(input);
- data->is_connected_control_ = true;
- }
- } else {
- // Otherwise, {node} is the start node, because it is floating control
- // but is connected to {input} that is not floating control.
- DCHECK_EQ(NULL, start); // There can be only one.
- start = node;
- }
- }
- }
-
- DCHECK_NE(NULL, start);
- start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);
-
- Trace(" Connecting floating control start %d:%s to %d:%s\n", start->id(),
- start->op()->mnemonic(), block_start->id(),
- block_start->op()->mnemonic());
+ nodes->clear();
}
} // namespace compiler
diff --git a/src/compiler/scheduler.h b/src/compiler/scheduler.h
index a0addbd..25e93f9 100644
--- a/src/compiler/scheduler.h
+++ b/src/compiler/scheduler.h
@@ -29,7 +29,18 @@
Schedule* schedule);
private:
- enum Placement { kUnknown, kSchedulable, kFixed, kCoupled };
+ // Placement of a node changes during scheduling. The placement state
+ // transitions over time while the scheduler is choosing a position:
+ //
+ // +---------------------+-----+----> kFixed
+ // / / /
+ // kUnknown ----+------> kCoupled ----+ /
+ // \ /
+ // +----> kSchedulable ----+--------> kScheduled
+ //
+ // 1) GetPlacement(): kUnknown -> kCoupled|kSchedulable|kFixed
+ // 2) UpdatePlacement(): kCoupled|kSchedulable -> kFixed|kScheduled
+ enum Placement { kUnknown, kSchedulable, kFixed, kCoupled, kScheduled };
// Per-node data tracked during scheduling.
struct SchedulerData {
@@ -49,7 +60,6 @@
NodeVector schedule_root_nodes_; // Fixed root nodes seed the worklist.
ZoneQueue<Node*> schedule_queue_; // Worklist of schedulable nodes.
ZoneVector<SchedulerData> node_data_; // Per-node data for all nodes.
- bool has_floating_control_;
Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
@@ -57,6 +67,7 @@
inline SchedulerData* GetData(Node* node);
Placement GetPlacement(Node* node);
+ void UpdatePlacement(Node* node, Placement placement);
void IncrementUnscheduledUseCount(Node* node, Node* from);
void DecrementUnscheduledUseCount(Node* node, Node* from);
@@ -85,8 +96,8 @@
friend class ScheduleLateNodeVisitor;
void ScheduleLate();
- bool ConnectFloatingControl();
- void ConnectFloatingControlSubgraph(BasicBlock* block, Node* node);
+ void FuseFloatingControl(BasicBlock* block, Node* node);
+ void MovePlannedNodes(BasicBlock* from, BasicBlock* to);
};
} // namespace compiler
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index 1a4ca96..35707f2 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -605,19 +605,17 @@
break;
}
case IrOpcode::kNumberDivide: {
- NumberMatcher right(node->InputAt(1));
- if (right.HasValue() && !right.Is(0) && !right.Is(-1)) {
- if (CanLowerToInt32Binop(node, use)) {
- // => signed Int32Div
- VisitInt32Binop(node);
- if (lower()) node->set_op(Int32Op(node));
- break;
- } else if (CanLowerToUint32Binop(node, use)) {
- // => unsigned Uint32Div
- VisitUint32Binop(node);
- if (lower()) node->set_op(Uint32Op(node));
- break;
- }
+ if (CanLowerToInt32Binop(node, use)) {
+ // => signed Int32Div
+ VisitInt32Binop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ break;
+ }
+ if (CanLowerToUint32Binop(node, use)) {
+ // => unsigned Uint32Div
+ VisitUint32Binop(node);
+ if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
+ break;
}
// => Float64Div
VisitFloat64Binop(node);
@@ -625,20 +623,17 @@
break;
}
case IrOpcode::kNumberModulus: {
- NumberMatcher right(node->InputAt(1));
- if (right.HasValue() && !right.Is(0) && !right.Is(-1)) {
- if (BothInputsAre(node, Type::Signed32()) &&
- !CanObserveMinusZero(use)) {
- // => signed Int32Mod
- VisitInt32Binop(node);
- if (lower()) node->set_op(Int32Op(node));
- break;
- } else if (BothInputsAre(node, Type::Unsigned32())) {
- // => unsigned Uint32Mod
- VisitUint32Binop(node);
- if (lower()) node->set_op(Uint32Op(node));
- break;
- }
+ if (CanLowerToInt32Binop(node, use)) {
+ // => signed Int32Mod
+ VisitInt32Binop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ break;
+ }
+ if (CanLowerToUint32Binop(node, use)) {
+ // => unsigned Uint32Mod
+ VisitUint32Binop(node);
+ if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ break;
}
// => Float64Mod
VisitFloat64Binop(node);
@@ -1079,23 +1074,23 @@
const ElementAccess& access = ElementAccessOf(node->op());
const Operator* op = machine()->Load(access.machine_type);
Node* key = node->InputAt(1);
+ Node* effect = node->InputAt(3);
Node* index = ComputeIndex(access, key);
if (access.bounds_check == kNoBoundsCheck) {
DCHECK_EQ(access.machine_type, output_type);
node->set_op(op);
node->ReplaceInput(1, index);
- node->RemoveInput(2);
+ node->ReplaceInput(2, effect);
+ node->ReplaceInput(3, graph()->start());
} else {
DCHECK_EQ(kTypedArrayBoundsCheck, access.bounds_check);
Node* base = node->InputAt(0);
Node* length = node->InputAt(2);
- Node* effect = node->InputAt(3);
- Node* control = node->InputAt(4);
Node* check = graph()->NewNode(machine()->Uint32LessThan(), key, length);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
+ graph()->start());
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* load = graph()->NewNode(op, base, index, effect, if_true);
@@ -1218,6 +1213,148 @@
}
+Node* SimplifiedLowering::Int32Div(Node* const node) {
+ Int32BinopMatcher m(node);
+ Node* const zero = jsgraph()->Int32Constant(0);
+ Node* const lhs = m.left().node();
+ Node* const rhs = m.right().node();
+
+ if (m.right().Is(-1)) {
+ return graph()->NewNode(machine()->Int32Sub(), zero, lhs);
+ } else if (m.right().Is(0)) {
+ return rhs;
+ } else if (machine()->Int32DivIsSafe() || m.right().HasValue()) {
+ return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
+ }
+
+ Node* check0 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse), check0,
+ graph()->start());
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* true0 = zero;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* false0 = nullptr;
+ {
+ Node* check1 = graph()->NewNode(machine()->Word32Equal(), rhs,
+ jsgraph()->Int32Constant(-1));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* true1 = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* false1 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_false1);
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ false0 = graph()->NewNode(common()->Phi(kMachInt32, 2), true1, false1,
+ if_false0);
+ }
+
+ Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ return graph()->NewNode(common()->Phi(kMachInt32, 2), true0, false0, merge0);
+}
+
+
+Node* SimplifiedLowering::Int32Mod(Node* const node) {
+ Int32BinopMatcher m(node);
+ Node* const zero = jsgraph()->Int32Constant(0);
+ Node* const lhs = m.left().node();
+ Node* const rhs = m.right().node();
+
+ if (m.right().Is(-1) || m.right().Is(0)) {
+ return zero;
+ } else if (machine()->Int32ModIsSafe() || m.right().HasValue()) {
+ return graph()->NewNode(machine()->Int32Mod(), lhs, rhs, graph()->start());
+ }
+
+ Node* check0 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse), check0,
+ graph()->start());
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* true0 = zero;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* false0 = nullptr;
+ {
+ Node* check1 = graph()->NewNode(machine()->Word32Equal(), rhs,
+ jsgraph()->Int32Constant(-1));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* true1 = zero;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* false1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_false1);
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ false0 = graph()->NewNode(common()->Phi(kMachInt32, 2), true1, false1,
+ if_false0);
+ }
+
+ Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ return graph()->NewNode(common()->Phi(kMachInt32, 2), true0, false0, merge0);
+}
+
+
+Node* SimplifiedLowering::Uint32Div(Node* const node) {
+ Uint32BinopMatcher m(node);
+ Node* const zero = jsgraph()->Uint32Constant(0);
+ Node* const lhs = m.left().node();
+ Node* const rhs = m.right().node();
+
+ if (m.right().Is(0)) {
+ return zero;
+ } else if (machine()->Uint32DivIsSafe() || m.right().HasValue()) {
+ return graph()->NewNode(machine()->Uint32Div(), lhs, rhs, graph()->start());
+ }
+
+ Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse), check,
+ graph()->start());
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = zero;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, if_false);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ return graph()->NewNode(common()->Phi(kMachUint32, 2), vtrue, vfalse, merge);
+}
+
+
+Node* SimplifiedLowering::Uint32Mod(Node* const node) {
+ Uint32BinopMatcher m(node);
+ Node* const zero = jsgraph()->Uint32Constant(0);
+ Node* const lhs = m.left().node();
+ Node* const rhs = m.right().node();
+
+ if (m.right().Is(0)) {
+ return zero;
+ } else if (machine()->Uint32ModIsSafe() || m.right().HasValue()) {
+ return graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, graph()->start());
+ }
+
+ Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse), check,
+ graph()->start());
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = zero;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, if_false);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ return graph()->NewNode(common()->Phi(kMachUint32, 2), vtrue, vfalse, merge);
+}
+
+
void SimplifiedLowering::DoStringEqual(Node* node) {
node->set_op(machine()->WordEqual());
node->ReplaceInput(0, StringComparison(node, false));
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index 2264d91..d4d99d3 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -42,6 +42,10 @@
Node* OffsetMinusTagConstant(int32_t offset);
Node* ComputeIndex(const ElementAccess& access, Node* index);
Node* StringComparison(Node* node, bool requires_ordering);
+ Node* Int32Div(Node* const node);
+ Node* Int32Mod(Node* const node);
+ Node* Uint32Div(Node* const node);
+ Node* Uint32Mod(Node* const node);
friend class RepresentationSelector;
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index 21a18ea..a1a6a02 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -102,8 +102,8 @@
case IrOpcode::kLoadElement: {
ElementAccess access = ElementAccessOf(node->op());
if (access.bounds_check == kTypedArrayBoundsCheck) {
- NumericValueMatcher mkey(node->InputAt(1));
- NumericValueMatcher mlength(node->InputAt(2));
+ NumberMatcher mkey(node->InputAt(1));
+ NumberMatcher mlength(node->InputAt(2));
if (mkey.HasValue() && mlength.HasValue()) {
// Skip the typed array bounds check if key and length are constant.
if (mkey.Value() >= 0 && mkey.Value() < mlength.Value()) {
@@ -118,8 +118,8 @@
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
if (access.bounds_check == kTypedArrayBoundsCheck) {
- NumericValueMatcher mkey(node->InputAt(1));
- NumericValueMatcher mlength(node->InputAt(2));
+ NumberMatcher mkey(node->InputAt(1));
+ NumberMatcher mlength(node->InputAt(2));
if (mkey.HasValue() && mlength.HasValue()) {
// Skip the typed array bounds check if key and length are constant.
if (mkey.Value() >= 0 && mkey.Value() < mlength.Value()) {
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 78713a0..41674a3 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -1112,8 +1112,9 @@
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::Flag::kNoFlags;
+ return MachineOperatorBuilder::kNoFlags;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/debug.cc b/src/debug.cc
index c1c2aad..6521110 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -2762,6 +2762,7 @@
Handle<Object> exec_state,
Handle<Object> event_data,
v8::Debug::ClientData* client_data) {
+ DisableBreak no_break(this, true);
if (event_listener_->IsForeign()) {
// Invoke the C debug event listener.
v8::Debug::EventCallback callback =
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 6951d8c..d161d55 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -114,6 +114,11 @@
#else
#define ENABLE_ARMV7_DEFAULT false
#endif
+#if (defined CAN_USE_ARMV8_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
+#define ENABLE_ARMV8_DEFAULT true
+#else
+#define ENABLE_ARMV8_DEFAULT false
+#endif
#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
#define ENABLE_32DREGS_DEFAULT true
#else
@@ -353,8 +358,9 @@
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_STRING(trace_turbo_cfg_file, NULL,
"trace turbo cfg graph (for C1 visualizer) to a given file name")
-DEFINE_BOOL(trace_turbo_types, true, "trace generated TurboFan types")
-DEFINE_BOOL(trace_turbo_scheduler, false, "trace generated TurboFan scheduler")
+DEFINE_BOOL(trace_turbo_types, true, "trace TurboFan's types")
+DEFINE_BOOL(trace_turbo_scheduler, false, "trace TurboFan's scheduler")
+DEFINE_BOOL(trace_turbo_reduction, false, "trace TurboFan's various reducers")
DEFINE_BOOL(turbo_asm, false, "enable TurboFan for asm.js code")
DEFINE_BOOL(turbo_verify, false, "verify TurboFan graphs at each phase")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
@@ -368,6 +374,7 @@
DEFINE_BOOL(turbo_inlining_intrinsics, false,
"enable inlining of intrinsics in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
+DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
DEFINE_IMPLICATION(turbo_inlining_intrinsics, turbo_inlining)
DEFINE_IMPLICATION(turbo_inlining, turbo_types)
DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
@@ -401,6 +408,8 @@
"enable use of VFP3 instructions if available")
DEFINE_BOOL(enable_armv7, ENABLE_ARMV7_DEFAULT,
"enable use of ARMv7 instructions if available (ARM only)")
+DEFINE_BOOL(enable_armv8, ENABLE_ARMV8_DEFAULT,
+ "enable use of ARMv8 instructions if available (ARM 32-bit only)")
DEFINE_BOOL(enable_neon, ENABLE_NEON_DEFAULT,
"enable use of NEON instructions if available (ARM only)")
DEFINE_BOOL(enable_sudiv, true,
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 9d31cd5..bd38467 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -308,11 +308,6 @@
TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
- if (!AstNumbering::Renumber(info->function(), info->zone())) {
- DCHECK(!isolate->has_pending_exception());
- return false;
- }
-
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
@@ -1585,9 +1580,9 @@
__ Push(Smi::FromInt(lit->start_position()));
__ Push(Smi::FromInt(lit->end_position()));
- // TODO(arv): Process methods
-
__ CallRuntime(Runtime::kDefineClass, 6);
+ EmitClassDefineProperties(lit);
+
context()->Plug(result_register());
}
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 1aeb861..8c210ba 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -563,6 +563,11 @@
// The receiver and the key is left on the stack by the IC.
void EmitKeyedPropertyLoad(Property* expr);
+ // Adds the properties to the class (function) object and to its prototype.
+ // Expects the class (function) in the accumulator. The class (function) is
+ // in the accumulator after installing all the properties.
+ void EmitClassDefineProperties(ClassLiteral* lit);
+
// Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator.
void EmitBinaryOp(BinaryOperation* expr,
diff --git a/src/globals.h b/src/globals.h
index 919d9de..03eb99e 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -67,11 +67,7 @@
#endif
// Determine whether the architecture uses an out-of-line constant pool.
-#if V8_TARGET_ARCH_ARM
-#define V8_OOL_CONSTANT_POOL 1
-#else
#define V8_OOL_CONSTANT_POOL 0
-#endif
#ifdef V8_TARGET_ARCH_ARM
// Set stack limit lower for ARM than for other architectures because
@@ -609,28 +605,29 @@
// CPU feature flags.
enum CpuFeature {
- // x86
- SSE4_1,
- SSE3,
- SAHF,
- // ARM
- VFP3,
- ARMv7,
- SUDIV,
- MLS,
- UNALIGNED_ACCESSES,
- MOVW_MOVT_IMMEDIATE_LOADS,
- VFP32DREGS,
- NEON,
- // MIPS, MIPS64
- FPU,
- FP64FPU,
- MIPSr1,
- MIPSr2,
- MIPSr6,
- // ARM64
- ALWAYS_ALIGN_CSP,
- NUMBER_OF_CPU_FEATURES
+ // x86
+ SSE4_1,
+ SSE3,
+ SAHF,
+ // ARM
+ VFP3,
+ ARMv7,
+ ARMv8,
+ SUDIV,
+ MLS,
+ UNALIGNED_ACCESSES,
+ MOVW_MOVT_IMMEDIATE_LOADS,
+ VFP32DREGS,
+ NEON,
+ // MIPS, MIPS64
+ FPU,
+ FP64FPU,
+ MIPSr1,
+ MIPSr2,
+ MIPSr6,
+ // ARM64
+ ALWAYS_ALIGN_CSP,
+ NUMBER_OF_CPU_FEATURES
};
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index 680e1cc..68522fc 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -1698,7 +1698,7 @@
continue;
}
if (ExtractAccessorPairProperty(js_obj, entry, k, value)) continue;
- SetPropertyReference(js_obj, entry, String::cast(k), value);
+ SetPropertyReference(js_obj, entry, Name::cast(k), value);
}
}
}
@@ -2300,7 +2300,6 @@
: isolate_(snapshot->profiler()->heap_object_map()->heap()->isolate()),
snapshot_(snapshot),
names_(snapshot_->profiler()->names()),
- progress_(progress),
embedder_queried_(false),
objects_by_info_(RetainedInfosMatch),
native_groups_(StringsMatch),
diff --git a/src/heap-snapshot-generator.h b/src/heap-snapshot-generator.h
index 3e4ce71..646d497 100644
--- a/src/heap-snapshot-generator.h
+++ b/src/heap-snapshot-generator.h
@@ -176,7 +176,6 @@
void FillChildren();
void Print(int max_depth);
- void PrintEntriesSize();
private:
HeapEntry* AddRootEntry();
@@ -332,7 +331,6 @@
v8::HeapProfiler::ObjectNameResolver* resolver);
virtual ~V8HeapExplorer();
virtual HeapEntry* AllocateEntry(HeapThing ptr);
- void AddRootEntries(SnapshotFiller* filler);
int EstimateObjectsCount(HeapIterator* iterator);
bool IterateAndExtractReferences(SnapshotFiller* filler);
void TagGlobalObjects();
@@ -473,7 +471,6 @@
NativeObjectsExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress);
virtual ~NativeObjectsExplorer();
- void AddRootEntries(SnapshotFiller* filler);
int EstimateObjectsCount();
bool IterateAndExtractReferences(SnapshotFiller* filler);
@@ -506,7 +503,6 @@
Isolate* isolate_;
HeapSnapshot* snapshot_;
StringsStorage* names_;
- SnapshottingProgressReportingInterface* progress_;
bool embedder_queried_;
HeapObjectsSet in_groups_;
// RetainedObjectInfo* -> List<HeapObject*>*
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index f309eae..bcfa2cb 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -2926,8 +2926,7 @@
}
static inline void UpdateSlot(Heap* heap, Object** slot) {
- Object* obj = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+ Object* obj = *slot;
if (!obj->IsHeapObject()) return;
@@ -2938,10 +2937,7 @@
DCHECK(heap->InFromSpace(heap_obj) ||
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
HeapObject* target = map_word.ToForwardingAddress();
- base::NoBarrier_CompareAndSwap(
- reinterpret_cast<base::AtomicWord*>(slot),
- reinterpret_cast<base::AtomicWord>(obj),
- reinterpret_cast<base::AtomicWord>(target));
+ *slot = target;
DCHECK(!heap->InFromSpace(target) &&
!MarkCompactCollector::IsOnEvacuationCandidate(target));
}
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index c2bdd66..d058ae0 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -7845,8 +7845,7 @@
// step, but don't transfer ownership to target_info.
target_info.SetAstValueFactory(top_info()->ast_value_factory(), false);
Handle<SharedFunctionInfo> target_shared(target->shared());
- if (!Parser::Parse(&target_info) || !Scope::Analyze(&target_info) ||
- !AstNumbering::Renumber(target_info.function(), target_info.zone())) {
+ if (!Compiler::ParseAndAnalyze(&target_info)) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index c033fd5..de08bd1 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -2421,6 +2421,67 @@
}
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in eax.
+ DCHECK(lit != NULL);
+ __ push(eax);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = ebx;
+ __ mov(scratch, FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ push(Operand(esp, kPointerSize)); // constructor
+ } else {
+ __ push(Operand(esp, 0)); // prototype
+ }
+ VisitForStackValue(key);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ VisitForStackValue(value);
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ VisitForStackValue(value);
+ __ push(Immediate(isolate()->factory()->null_value()));
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ push(Immediate(isolate()->factory()->null_value()));
+ VisitForStackValue(value);
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
@@ -2857,6 +2918,8 @@
__ push(Immediate(isolate()->factory()->undefined_value()));
}
+ // Push the enclosing function.
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the language mode.
@@ -2866,7 +2929,7 @@
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
index 9465257..52aafca 100644
--- a/src/ic/arm/ic-arm.cc
+++ b/src/ic/arm/ic-arm.cc
@@ -598,7 +598,7 @@
}
-static void KeyedStoreGenerateGenericHelper(
+static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
Register value, Register key, Register receiver, Register receiver_map,
@@ -739,9 +739,8 @@
}
-void KeyedStoreIC::GenerateGeneric(
- MacroAssembler* masm, StrictMode strict_mode,
- KeyedStoreStubCacheRequirement handler_requirement) {
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -808,12 +807,7 @@
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
key, r3, r4, r5, r6);
// Cache miss.
- if (handler_requirement == kCallRuntimeOnMissingHandler) {
- __ b(&slow);
- } else {
- DCHECK(handler_requirement == kMissOnMissingHandler);
- __ b(&miss);
- }
+ __ b(&miss);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -848,13 +842,13 @@
__ cmp(key, Operand(ip));
__ b(hs, &extra);
- KeyedStoreGenerateGenericHelper(
+ KeyedStoreGenerateMegamorphicHelper(
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength, value,
- key, receiver, receiver_map, elements_map,
- elements);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+ &fast_double_grow, &slow, kDontCheckMap,
+ kIncrementLength, value, key, receiver,
+ receiver_map, elements_map, elements);
__ bind(&miss);
GenerateMiss(masm);
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
index d25dd07..4804a23 100644
--- a/src/ic/arm64/ic-arm64.cc
+++ b/src/ic/arm64/ic-arm64.cc
@@ -640,7 +640,7 @@
}
-static void KeyedStoreGenerateGenericHelper(
+static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
Register value, Register key, Register receiver, Register receiver_map,
@@ -772,10 +772,9 @@
}
-void KeyedStoreIC::GenerateGeneric(
- MacroAssembler* masm, StrictMode strict_mode,
- KeyedStoreStubCacheRequirement handler_requirement) {
- ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
Label slow;
Label array;
Label fast_object;
@@ -841,12 +840,7 @@
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
key, x3, x4, x5, x6);
// Cache miss.
- if (handler_requirement == kCallRuntimeOnMissingHandler) {
- __ B(&slow);
- } else {
- DCHECK(handler_requirement == kMissOnMissingHandler);
- __ B(&miss);
- }
+ __ B(&miss);
__ Bind(&extra);
// Extra capacity case: Check if there is extra capacity to
@@ -881,18 +875,16 @@
__ B(eq, &extra); // We can handle the case where we are appending 1 element.
__ B(lo, &slow);
- KeyedStoreGenerateGenericHelper(
+ KeyedStoreGenerateMegamorphicHelper(
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength, value,
- key, receiver, receiver_map, elements_map,
- elements);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+ &fast_double_grow, &slow, kDontCheckMap,
+ kIncrementLength, value, key, receiver,
+ receiver_map, elements_map, elements);
- if (handler_requirement == kMissOnMissingHandler) {
- __ bind(&miss);
- GenerateMiss(masm);
- }
+ __ bind(&miss);
+ GenerateMiss(masm);
}
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index f6dac32..a622ba4 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -507,7 +507,7 @@
}
-static void KeyedStoreGenerateGenericHelper(
+static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
Label transition_smi_elements;
@@ -645,9 +645,8 @@
}
-void KeyedStoreIC::GenerateGeneric(
- MacroAssembler* masm, StrictMode strict_mode,
- KeyedStoreStubCacheRequirement handler_requirement) {
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictMode strict_mode) {
// Return address is on the stack.
Label slow, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
@@ -696,12 +695,7 @@
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
key, ebx, no_reg);
// Cache miss.
- if (handler_requirement == kCallRuntimeOnMissingHandler) {
- __ jmp(&slow);
- } else {
- DCHECK(handler_requirement == kMissOnMissingHandler);
- __ jmp(&miss);
- }
+ __ jmp(&miss);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -740,15 +734,14 @@
__ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
- kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
+ kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+ &fast_double_grow, &slow, kDontCheckMap,
+ kIncrementLength);
- if (handler_requirement == kMissOnMissingHandler) {
- __ bind(&miss);
- GenerateMiss(masm);
- }
+ __ bind(&miss);
+ GenerateMiss(masm);
}
diff --git a/src/ic/ic-inl.h b/src/ic/ic-inl.h
index 91b8955..22f66d0 100644
--- a/src/ic/ic-inl.h
+++ b/src/ic/ic-inl.h
@@ -208,30 +208,12 @@
}
-inline Code* CallIC::get_host() {
+inline Code* IC::get_host() {
return isolate()
->inner_pointer_to_code_cache()
->GetCacheEntry(address())
->code;
}
-
-
-// static
-IC::State CallIC::FeedbackToState(Isolate* isolate, TypeFeedbackVector* vector,
- FeedbackVectorICSlot slot) {
- IC::State state = UNINITIALIZED;
- Object* feedback = vector->Get(slot);
-
- if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
- state = GENERIC;
- } else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) {
- state = MONOMORPHIC;
- } else {
- CHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate));
- }
-
- return state;
-}
}
} // namespace v8::internal
diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc
index 5a17bc8..18ea7f3 100644
--- a/src/ic/ic-state.cc
+++ b/src/ic/ic-state.cc
@@ -18,12 +18,18 @@
// static
+template <class Nexus>
void ICUtility::Clear(Isolate* isolate, Code::Kind kind, Code* host,
- TypeFeedbackVector* vector, FeedbackVectorICSlot slot) {
- IC::Clear(isolate, kind, host, vector, slot);
+ Nexus* nexus) {
+ IC::Clear<Nexus>(isolate, kind, host, nexus);
}
+// Force instantiation of template instances for vector-based IC clearing.
+template void ICUtility::Clear<CallICNexus>(Isolate*, Code::Kind, Code*,
+ CallICNexus*);
+
+
CallICState::CallICState(ExtraICState extra_ic_state)
: argc_(ArgcBits::decode(extra_ic_state)),
call_type_(CallTypeBits::decode(extra_ic_state)) {}
diff --git a/src/ic/ic-state.h b/src/ic/ic-state.h
index a8cfefd..9bb877a 100644
--- a/src/ic/ic-state.h
+++ b/src/ic/ic-state.h
@@ -20,8 +20,9 @@
static void Clear(Isolate* isolate, Address address,
ConstantPoolArray* constant_pool);
// Clear a vector-based inline cache to initial state.
+ template <class Nexus>
static void Clear(Isolate* isolate, Code::Kind kind, Code* host,
- TypeFeedbackVector* vector, FeedbackVectorICSlot slot);
+ Nexus* nexus);
};
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index 98d86b5..d0c0edb 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -89,8 +89,8 @@
void IC::TraceIC(const char* type, Handle<Object> name) {
if (FLAG_trace_ic) {
- Code* new_target = raw_target();
- State new_state = new_target->ic_state();
+ State new_state =
+ UseVector() ? nexus()->StateFromFeedback() : raw_target()->ic_state();
TraceIC(type, name, state(), new_state);
}
}
@@ -134,12 +134,16 @@
}
}
-#define TRACE_IC(type, name) TraceIC(type, name)
-#define TRACE_VECTOR_IC(type, name, old_state, new_state) \
- TraceIC(type, name, old_state, new_state)
-IC::IC(FrameDepth depth, Isolate* isolate)
- : isolate_(isolate), target_set_(false), target_maps_set_(false) {
+#define TRACE_IC(type, name) TraceIC(type, name)
+
+
+IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus,
+ bool for_queries_only)
+ : isolate_(isolate),
+ target_set_(false),
+ target_maps_set_(false),
+ nexus_(nexus) {
// To improve the performance of the (much used) IC code, we unfold a few
// levels of the stack frame iteration code. This yields a ~35% speedup when
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
@@ -178,8 +182,10 @@
}
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
target_ = handle(raw_target(), isolate);
- state_ = target_->ic_state();
kind_ = target_->kind();
+ state_ = (!for_queries_only && UseVector()) ? nexus->StateFromFeedback()
+ : target_->ic_state();
+ old_state_ = state_;
extra_ic_state_ = target_->extra_ic_state();
}
@@ -419,6 +425,30 @@
}
+// static
+void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host,
+ TypeFeedbackVector* vector, State old_state,
+ State new_state) {
+ if (host->kind() != Code::FUNCTION) return;
+
+ if (FLAG_type_info_threshold > 0) {
+ int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic.
+ int generic_delta = 0; // "Generic" here includes megamorphic.
+ ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
+ &generic_delta);
+ vector->change_ic_with_type_info_count(polymorphic_delta);
+ vector->change_ic_generic_count(generic_delta);
+ }
+ TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->change_own_type_change_checksum();
+ host->set_profiler_ticks(0);
+ isolate->runtime_profiler()->NotifyICChanged();
+ // TODO(2029): When an optimized function is patched, it would
+ // be nice to propagate the corresponding type information to its
+ // unoptimized version for the benefit of later inlining.
+}
+
+
void IC::PostPatching(Address address, Code* target, Code* old_target) {
// Type vector based ICs update these statistics at a different time because
// they don't always patch on state change.
@@ -507,17 +537,21 @@
}
-void IC::Clear(Isolate* isolate, Code::Kind kind, Code* host,
- TypeFeedbackVector* vector, FeedbackVectorICSlot slot) {
+template <class Nexus>
+void IC::Clear(Isolate* isolate, Code::Kind kind, Code* host, Nexus* nexus) {
switch (kind) {
case Code::CALL_IC:
- return CallIC::Clear(isolate, host, vector, slot);
+ return CallIC::Clear(isolate, host, nexus);
default:
UNREACHABLE();
}
}
+// Force instantiation of template instances for vector-based IC clearing.
+template void IC::Clear(Isolate*, Code::Kind, Code*, CallICNexus*);
+
+
void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
@@ -529,18 +563,15 @@
}
-void CallIC::Clear(Isolate* isolate, Code* host, TypeFeedbackVector* vector,
- FeedbackVectorICSlot slot) {
- DCHECK(vector != NULL && !slot.IsInvalid());
- Object* feedback = vector->Get(slot);
+void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
// Determine our state.
- State state = FeedbackToState(isolate, vector, slot);
+ Object* feedback = nexus->vector()->Get(nexus->slot());
+ State state = nexus->StateFromFeedback();
if (state != UNINITIALIZED && !feedback->IsAllocationSite()) {
- vector->Set(slot, isolate->heap()->uninitialized_symbol(),
- SKIP_WRITE_BARRIER);
+ nexus->ConfigureUninitialized();
// The change in state must be processed.
- OnTypeFeedbackChanged(isolate, host, vector, state, UNINITIALIZED);
+ OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, UNINITIALIZED);
}
}
@@ -1672,11 +1703,10 @@
return generic_stub();
}
- // If the maximum number of receiver maps has been exceeded, use the generic
- // version of the IC.
+ // If the maximum number of receiver maps has been exceeded, use the
+ // megamorphic version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "max polymorph exceeded");
- return generic_stub();
+ return megamorphic_stub();
}
// Make sure all polymorphic handlers have the same store mode, otherwise the
@@ -1941,33 +1971,14 @@
// static
-void CallIC::OnTypeFeedbackChanged(Isolate* isolate, Code* host,
- TypeFeedbackVector* vector, State old_state,
- State new_state) {
- if (host->kind() != Code::FUNCTION) return;
-
- if (FLAG_type_info_threshold > 0) {
- int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic.
- int generic_delta = 0; // "Generic" here includes megamorphic.
- ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
- &generic_delta);
- vector->change_ic_with_type_info_count(polymorphic_delta);
- vector->change_ic_generic_count(generic_delta);
- }
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_own_type_change_checksum();
- host->set_profiler_ticks(0);
- isolate->runtime_profiler()->NotifyICChanged();
- // TODO(2029): When an optimized function is patched, it would
- // be nice to propagate the corresponding type information to its
- // unoptimized version for the benefit of later inlining.
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
}
bool CallIC::DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
- Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot,
- const CallICState& state) {
+ const CallICState& callic_state) {
DCHECK(FLAG_use_ic && function->IsJSFunction());
// Are we the array function?
@@ -1975,42 +1986,33 @@
Handle<JSFunction>(isolate()->native_context()->array_function());
if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
// Alter the slot.
- IC::State old_state = FeedbackToState(isolate(), *vector, slot);
- Object* feedback = vector->Get(slot);
- if (!feedback->IsAllocationSite()) {
- Handle<AllocationSite> new_site =
- isolate()->factory()->NewAllocationSite();
- vector->Set(slot, *new_site);
- }
+ CallICNexus* nexus = casted_nexus<CallICNexus>();
+ nexus->ConfigureMonomorphicArray();
- CallIC_ArrayStub stub(isolate(), state);
+ CallIC_ArrayStub stub(isolate(), callic_state);
set_target(*stub.GetCode());
Handle<String> name;
if (array_function->shared()->name()->IsString()) {
name = Handle<String>(String::cast(array_function->shared()->name()),
isolate());
}
-
- IC::State new_state = FeedbackToState(isolate(), *vector, slot);
- OnTypeFeedbackChanged(isolate(), get_host(), *vector, old_state, new_state);
- TRACE_VECTOR_IC("CallIC (custom handler)", name, old_state, new_state);
+ TRACE_IC("CallIC", name);
+ OnTypeFeedbackChanged(isolate(), get_host(), nexus->vector(), state(),
+ MONOMORPHIC);
return true;
}
return false;
}
-void CallIC::PatchMegamorphic(Handle<Object> function,
- Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
- CallICState state(target()->extra_ic_state());
- IC::State old_state = FeedbackToState(isolate(), *vector, slot);
+void CallIC::PatchMegamorphic(Handle<Object> function) {
+ CallICState callic_state(target()->extra_ic_state());
// We are going generic.
- vector->Set(slot, *TypeFeedbackVector::MegamorphicSentinel(isolate()),
- SKIP_WRITE_BARRIER);
+ CallICNexus* nexus = casted_nexus<CallICNexus>();
+ nexus->ConfigureGeneric();
- CallICStub stub(isolate(), state);
+ CallICStub stub(isolate(), callic_state);
Handle<Code> code = stub.GetCode();
set_target(*code);
@@ -2020,27 +2022,24 @@
name = handle(js_function->shared()->name(), isolate());
}
- IC::State new_state = FeedbackToState(isolate(), *vector, slot);
- OnTypeFeedbackChanged(isolate(), get_host(), *vector, old_state, new_state);
- TRACE_VECTOR_IC("CallIC", name, old_state, new_state);
+ TRACE_IC("CallIC", name);
+ OnTypeFeedbackChanged(isolate(), get_host(), nexus->vector(), state(),
+ GENERIC);
}
-void CallIC::HandleMiss(Handle<Object> receiver, Handle<Object> function,
- Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
- CallICState state(target()->extra_ic_state());
- IC::State old_state = FeedbackToState(isolate(), *vector, slot);
+void CallIC::HandleMiss(Handle<Object> receiver, Handle<Object> function) {
+ CallICState callic_state(target()->extra_ic_state());
Handle<Object> name = isolate()->factory()->empty_string();
- Object* feedback = vector->Get(slot);
+ CallICNexus* nexus = casted_nexus<CallICNexus>();
+ Object* feedback = nexus->GetFeedback();
// Hand-coded MISS handling is easier if CallIC slots don't contain smis.
DCHECK(!feedback->IsSmi());
if (feedback->IsJSFunction() || !function->IsJSFunction()) {
// We are going generic.
- vector->Set(slot, *TypeFeedbackVector::MegamorphicSentinel(isolate()),
- SKIP_WRITE_BARRIER);
+ nexus->ConfigureGeneric();
} else {
// The feedback is either uninitialized or an allocation site.
// It might be an allocation site because if we re-compile the full code
@@ -2052,12 +2051,11 @@
feedback->IsAllocationSite());
// Do we want to install a custom handler?
- if (FLAG_use_ic &&
- DoCustomHandler(receiver, function, vector, slot, state)) {
+ if (FLAG_use_ic && DoCustomHandler(receiver, function, callic_state)) {
return;
}
- vector->Set(slot, *function);
+ nexus->ConfigureMonomorphic(Handle<JSFunction>::cast(function));
}
if (function->IsJSFunction()) {
@@ -2065,9 +2063,9 @@
name = handle(js_function->shared()->name(), isolate());
}
- IC::State new_state = FeedbackToState(isolate(), *vector, slot);
- OnTypeFeedbackChanged(isolate(), get_host(), *vector, old_state, new_state);
- TRACE_VECTOR_IC("CallIC", name, old_state, new_state);
+ IC::State new_state = nexus->StateFromFeedback();
+ OnTypeFeedbackChanged(isolate(), get_host(), *vector(), state(), new_state);
+ TRACE_IC("CallIC", name);
}
@@ -2083,13 +2081,14 @@
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK(args.length() == 4);
- CallIC ic(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> function = args.at<Object>(1);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
Handle<Smi> slot = args.at<Smi>(3);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- ic.HandleMiss(receiver, function, vector, vector_slot);
+ CallICNexus nexus(vector, vector_slot);
+ CallIC ic(isolate, &nexus);
+ ic.HandleMiss(receiver, function);
return *function;
}
@@ -2098,13 +2097,14 @@
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK(args.length() == 4);
- // A miss on a custom call ic always results in going megamorphic.
- CallIC ic(isolate);
Handle<Object> function = args.at<Object>(1);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
Handle<Smi> slot = args.at<Smi>(3);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- ic.PatchMegamorphic(function, vector, vector_slot);
+ CallICNexus nexus(vector, vector_slot);
+ // A miss on a custom call ic always results in going megamorphic.
+ CallIC ic(isolate, &nexus);
+ ic.PatchMegamorphic(function);
return *function;
}
diff --git a/src/ic/ic.h b/src/ic/ic.h
index 5177ff8..c50b43e 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -59,7 +59,8 @@
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
- IC(FrameDepth depth, Isolate* isolate);
+ IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL,
+ bool for_queries_only = false);
virtual ~IC() {}
State state() const { return state_; }
@@ -71,6 +72,7 @@
bool IsNameCompatibleWithPrototypeFailure(Handle<Object> name);
void MarkPrototypeFailure(Handle<Object> name) {
DCHECK(IsNameCompatibleWithPrototypeFailure(name));
+ old_state_ = state_;
state_ = PROTOTYPE_FAILURE;
}
@@ -87,8 +89,9 @@
ConstantPoolArray* constant_pool);
// Clear the vector-based inline cache to initial state.
+ template <class Nexus>
static void Clear(Isolate* isolate, Code::Kind kind, Code* host,
- TypeFeedbackVector* vector, FeedbackVectorICSlot slot);
+ Nexus* nexus);
#ifdef DEBUG
bool IsLoadStub() const {
@@ -117,6 +120,11 @@
return state == UNINITIALIZED || state == PREMONOMORPHIC;
}
+ static bool IsCleared(FeedbackNexus* nexus) {
+ InlineCacheState state = nexus->StateFromFeedback();
+ return state == UNINITIALIZED || state == PREMONOMORPHIC;
+ }
+
// Utility functions to convert maps to types and back. There are two special
// cases:
// - The heap_number_map is used as a marker which includes heap numbers as
@@ -149,6 +157,15 @@
inline void set_target(Code* code);
bool is_target_set() { return target_set_; }
+ bool UseVector() const {
+ bool use = (FLAG_vector_ics &&
+ (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC)) ||
+ kind() == Code::CALL_IC;
+ // If we are supposed to use the nexus, verify the nexus is non-null.
+ DCHECK(!use || nexus_ != NULL);
+ return use;
+ }
+
char TransitionMarkFromState(IC::State state);
void TraceIC(const char* type, Handle<Object> name);
void TraceIC(const char* type, Handle<Object> name, State old_state,
@@ -166,6 +183,10 @@
static void OnTypeFeedbackChanged(Isolate* isolate, Address address,
State old_state, State new_state,
bool target_remains_ic_stub);
+ // As a vector-based IC, type feedback must be updated differently.
+ static void OnTypeFeedbackChanged(Isolate* isolate, Code* host,
+ TypeFeedbackVector* vector, State old_state,
+ State new_state);
static void PostPatching(Address address, Code* target, Code* old_target);
// Compute the handler either by compiling or by retrieving a cached version.
@@ -229,6 +250,20 @@
inline void UpdateTarget();
+ Handle<TypeFeedbackVector> vector() const { return nexus()->vector_handle(); }
+ FeedbackVectorICSlot slot() const { return nexus()->slot(); }
+ State saved_state() const {
+ return state() == PROTOTYPE_FAILURE ? old_state_ : state();
+ }
+
+ template <class NexusClass>
+ NexusClass* casted_nexus() {
+ return static_cast<NexusClass*>(nexus_);
+ }
+ FeedbackNexus* nexus() const { return nexus_; }
+
+ inline Code* get_host();
+
private:
inline Code* raw_target() const;
inline ConstantPoolArray* constant_pool() const;
@@ -263,6 +298,7 @@
// The original code target that missed.
Handle<Code> target_;
bool target_set_;
+ State old_state_; // For saving if we marked as prototype failure.
State state_;
Code::Kind kind_;
Handle<HeapType> receiver_type_;
@@ -272,6 +308,8 @@
MapHandleList target_maps_;
bool target_maps_set_;
+ FeedbackNexus* nexus_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -295,38 +333,24 @@
class CallIC : public IC {
public:
- explicit CallIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+ CallIC(Isolate* isolate, CallICNexus* nexus)
+ : IC(EXTRA_CALL_FRAME, isolate, nexus) {
+ DCHECK(nexus != NULL);
+ }
- void PatchMegamorphic(Handle<Object> function,
- Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot);
+ void PatchMegamorphic(Handle<Object> function);
- void HandleMiss(Handle<Object> receiver, Handle<Object> function,
- Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot);
+ void HandleMiss(Handle<Object> receiver, Handle<Object> function);
// Returns true if a custom handler was installed.
bool DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
- Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot, const CallICState& state);
+ const CallICState& callic_state);
// Code generator routines.
static Handle<Code> initialize_stub(Isolate* isolate, int argc,
CallICState::CallType call_type);
- static void Clear(Isolate* isolate, Code* host, TypeFeedbackVector* vector,
- FeedbackVectorICSlot slot);
-
- private:
- static inline IC::State FeedbackToState(Isolate* isolate,
- TypeFeedbackVector* vector,
- FeedbackVectorICSlot slot);
-
- inline Code* get_host();
-
- // As a vector-based IC, type feedback must be updated differently.
- static void OnTypeFeedbackChanged(Isolate* isolate, Code* host,
- TypeFeedbackVector* vector, State old_state,
- State new_state);
+ static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
};
@@ -539,12 +563,6 @@
enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength };
-enum KeyedStoreStubCacheRequirement {
- kCallRuntimeOnMissingHandler,
- kMissOnMissingHandler
-};
-
-
class KeyedStoreIC : public StoreIC {
public:
// ExtraICState bits (building on IC)
@@ -585,9 +603,8 @@
}
static void GenerateMiss(MacroAssembler* masm);
static void GenerateSlow(MacroAssembler* masm);
- static void GenerateGeneric(
- MacroAssembler* masm, StrictMode strict_mode,
- KeyedStoreStubCacheRequirement handler_requirement);
+ static void GenerateMegamorphic(MacroAssembler* masm, StrictMode strict_mode);
+ static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
static void GenerateSloppyArguments(MacroAssembler* masm);
protected:
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index 01b9bb6..0984490 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -594,7 +594,7 @@
}
-static void KeyedStoreGenerateGenericHelper(
+static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
Register value, Register key, Register receiver, Register receiver_map,
@@ -740,9 +740,8 @@
}
-void KeyedStoreIC::GenerateGeneric(
- MacroAssembler* masm, StrictMode strict_mode,
- KeyedStoreStubCacheRequirement handler_requirement) {
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -805,12 +804,7 @@
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
key, a3, t0, t1, t2);
// Cache miss.
- if (handler_requirement == kCallRuntimeOnMissingHandler) {
- __ Branch(&slow);
- } else {
- DCHECK(handler_requirement == kMissOnMissingHandler);
- __ Branch(&miss);
- }
+ __ Branch(&miss);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -843,13 +837,13 @@
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(t0));
- KeyedStoreGenerateGenericHelper(
+ KeyedStoreGenerateMegamorphicHelper(
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength, value,
- key, receiver, receiver_map, elements_map,
- elements);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+ &fast_double_grow, &slow, kDontCheckMap,
+ kIncrementLength, value, key, receiver,
+ receiver_map, elements_map, elements);
__ bind(&miss);
GenerateMiss(masm);
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
index fd0501a..b4055b2 100644
--- a/src/ic/mips64/ic-mips64.cc
+++ b/src/ic/mips64/ic-mips64.cc
@@ -599,7 +599,7 @@
}
-static void KeyedStoreGenerateGenericHelper(
+static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
Register value, Register key, Register receiver, Register receiver_map,
@@ -749,9 +749,8 @@
}
-void KeyedStoreIC::GenerateGeneric(
- MacroAssembler* masm, StrictMode strict_mode,
- KeyedStoreStubCacheRequirement handler_requirement) {
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -814,12 +813,7 @@
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
key, a3, a4, a5, a6);
// Cache miss.
- if (handler_requirement == kCallRuntimeOnMissingHandler) {
- __ Branch(&slow);
- } else {
- DCHECK(handler_requirement == kMissOnMissingHandler);
- __ Branch(&miss);
- }
+ __ Branch(&miss);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -852,13 +846,13 @@
__ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(a4));
- KeyedStoreGenerateGenericHelper(
+ KeyedStoreGenerateMegamorphicHelper(
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength, value,
- key, receiver, receiver_map, elements_map,
- elements);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+ &fast_double_grow, &slow, kDontCheckMap,
+ kIncrementLength, value, key, receiver,
+ receiver_map, elements_map, elements);
__ bind(&miss);
GenerateMiss(masm);
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
index b945d20..f125322 100644
--- a/src/ic/x64/ic-x64.cc
+++ b/src/ic/x64/ic-x64.cc
@@ -403,7 +403,7 @@
}
-static void KeyedStoreGenerateGenericHelper(
+static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
Label transition_smi_elements;
@@ -540,9 +540,8 @@
}
-void KeyedStoreIC::GenerateGeneric(
- MacroAssembler* masm, StrictMode strict_mode,
- KeyedStoreStubCacheRequirement handler_requirement) {
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictMode strict_mode) {
// Return address is on the stack.
Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
@@ -594,12 +593,7 @@
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
key, rbx, no_reg);
// Cache miss.
- if (handler_requirement == kCallRuntimeOnMissingHandler) {
- __ jmp(&slow_with_tagged_index);
- } else {
- DCHECK(handler_requirement == kMissOnMissingHandler);
- __ jmp(&miss);
- }
+ __ jmp(&miss);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -635,15 +629,14 @@
__ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key);
__ j(below_equal, &extra);
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
- kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
+ kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+ &fast_double_grow, &slow, kDontCheckMap,
+ kIncrementLength);
- if (handler_requirement == kMissOnMissingHandler) {
- __ bind(&miss);
- GenerateMiss(masm);
- }
+ __ bind(&miss);
+ GenerateMiss(masm);
}
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index 432c55b..959b8b6 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -509,7 +509,7 @@
}
-static void KeyedStoreGenerateGenericHelper(
+static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
Label transition_smi_elements;
@@ -647,9 +647,8 @@
}
-void KeyedStoreIC::GenerateGeneric(
- MacroAssembler* masm, StrictMode strict_mode,
- KeyedStoreStubCacheRequirement handler_requirement) {
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictMode strict_mode) {
// Return address is on the stack.
Label slow, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
@@ -698,12 +697,7 @@
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
key, ebx, no_reg);
// Cache miss.
- if (handler_requirement == kCallRuntimeOnMissingHandler) {
- __ jmp(&slow);
- } else {
- DCHECK(handler_requirement == kMissOnMissingHandler);
- __ jmp(&miss);
- }
+ __ jmp(&miss);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -742,15 +736,14 @@
__ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
- kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
+ kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+ &fast_double_grow, &slow, kDontCheckMap,
+ kIncrementLength);
- if (handler_requirement == kMissOnMissingHandler) {
- __ bind(&miss);
- GenerateMiss(masm);
- }
+ __ bind(&miss);
+ GenerateMiss(masm);
}
diff --git a/src/isolate.cc b/src/isolate.cc
index 1e935af..59fa038 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -391,7 +391,6 @@
}
DCHECK(cursor + 4 <= elements->length());
-
Handle<Code> code = frames[i].code();
Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
// The stack trace API should not expose receivers and function
@@ -440,30 +439,198 @@
}
+Handle<JSArray> Isolate::GetDetailedStackTrace(Handle<JSObject> error_object) {
+ Handle<Name> key_detailed = factory()->detailed_stack_trace_symbol();
+ Handle<Object> stack_trace =
+ JSObject::GetDataProperty(error_object, key_detailed);
+ if (stack_trace->IsJSArray()) return Handle<JSArray>::cast(stack_trace);
+
+ if (!capture_stack_trace_for_uncaught_exceptions_) return Handle<JSArray>();
+
+ // Try to get details from simple stack trace.
+ Handle<JSArray> detailed_stack_trace =
+ GetDetailedFromSimpleStackTrace(error_object);
+ if (!detailed_stack_trace.is_null()) {
+ // Save the detailed stack since the simple one might be withdrawn later.
+ JSObject::SetProperty(error_object, key_detailed, detailed_stack_trace,
+ STRICT).Assert();
+ }
+ return detailed_stack_trace;
+}
+
+
+class CaptureStackTraceHelper {
+ public:
+ CaptureStackTraceHelper(Isolate* isolate,
+ StackTrace::StackTraceOptions options)
+ : isolate_(isolate) {
+ if (options & StackTrace::kColumnOffset) {
+ column_key_ =
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("column"));
+ }
+ if (options & StackTrace::kLineNumber) {
+ line_key_ =
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("lineNumber"));
+ }
+ if (options & StackTrace::kScriptId) {
+ script_id_key_ =
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptId"));
+ }
+ if (options & StackTrace::kScriptName) {
+ script_name_key_ =
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptName"));
+ }
+ if (options & StackTrace::kScriptNameOrSourceURL) {
+ script_name_or_source_url_key_ = factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("scriptNameOrSourceURL"));
+ }
+ if (options & StackTrace::kFunctionName) {
+ function_key_ = factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("functionName"));
+ }
+ if (options & StackTrace::kIsEval) {
+ eval_key_ =
+ factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("isEval"));
+ }
+ if (options & StackTrace::kIsConstructor) {
+ constructor_key_ = factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("isConstructor"));
+ }
+ }
+
+ Handle<JSObject> NewStackFrameObject(Handle<JSFunction> fun,
+ Handle<Code> code, Address pc,
+ bool is_constructor) {
+ Handle<JSObject> stack_frame =
+ factory()->NewJSObject(isolate_->object_function());
+
+ Handle<Script> script(Script::cast(fun->shared()->script()));
+
+ if (!line_key_.is_null()) {
+ int script_line_offset = script->line_offset()->value();
+ int position = code->SourcePosition(pc);
+ int line_number = Script::GetLineNumber(script, position);
+ // line_number is already shifted by the script_line_offset.
+ int relative_line_number = line_number - script_line_offset;
+ if (!column_key_.is_null() && relative_line_number >= 0) {
+ Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+ int start = (relative_line_number == 0) ? 0 :
+ Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+ int column_offset = position - start;
+ if (relative_line_number == 0) {
+ // For the case where the code is on the same line as the script
+ // tag.
+ column_offset += script->column_offset()->value();
+ }
+ JSObject::AddProperty(stack_frame, column_key_,
+ handle(Smi::FromInt(column_offset + 1), isolate_),
+ NONE);
+ }
+ JSObject::AddProperty(stack_frame, line_key_,
+ handle(Smi::FromInt(line_number + 1), isolate_),
+ NONE);
+ }
+
+ if (!script_id_key_.is_null()) {
+ JSObject::AddProperty(stack_frame, script_id_key_,
+ handle(script->id(), isolate_), NONE);
+ }
+
+ if (!script_name_key_.is_null()) {
+ JSObject::AddProperty(stack_frame, script_name_key_,
+ handle(script->name(), isolate_), NONE);
+ }
+
+ if (!script_name_or_source_url_key_.is_null()) {
+ Handle<Object> result = Script::GetNameOrSourceURL(script);
+ JSObject::AddProperty(stack_frame, script_name_or_source_url_key_, result,
+ NONE);
+ }
+
+ if (!function_key_.is_null()) {
+ Handle<Object> fun_name(fun->shared()->DebugName(), isolate_);
+ JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
+ }
+
+ if (!eval_key_.is_null()) {
+ Handle<Object> is_eval = factory()->ToBoolean(
+ script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
+ JSObject::AddProperty(stack_frame, eval_key_, is_eval, NONE);
+ }
+
+ if (!constructor_key_.is_null()) {
+ Handle<Object> is_constructor_obj = factory()->ToBoolean(is_constructor);
+ JSObject::AddProperty(stack_frame, constructor_key_, is_constructor_obj,
+ NONE);
+ }
+
+ return stack_frame;
+ }
+
+ private:
+ inline Factory* factory() { return isolate_->factory(); }
+
+ Isolate* isolate_;
+ Handle<String> column_key_;
+ Handle<String> line_key_;
+ Handle<String> script_id_key_;
+ Handle<String> script_name_key_;
+ Handle<String> script_name_or_source_url_key_;
+ Handle<String> function_key_;
+ Handle<String> eval_key_;
+ Handle<String> constructor_key_;
+};
+
+
+Handle<JSArray> Isolate::GetDetailedFromSimpleStackTrace(
+ Handle<JSObject> error_object) {
+ Handle<Name> key = factory()->stack_trace_symbol();
+ Handle<Object> property = JSObject::GetDataProperty(error_object, key);
+ if (!property->IsJSArray()) return Handle<JSArray>();
+ Handle<JSArray> simple_stack_trace = Handle<JSArray>::cast(property);
+
+ CaptureStackTraceHelper helper(this,
+ stack_trace_for_uncaught_exceptions_options_);
+
+ int frames_seen = 0;
+ Handle<FixedArray> elements(FixedArray::cast(simple_stack_trace->elements()));
+ int elements_limit = Smi::cast(simple_stack_trace->length())->value();
+
+ int frame_limit = stack_trace_for_uncaught_exceptions_frame_limit_;
+ if (frame_limit < 0) frame_limit = (elements_limit - 1) / 4;
+
+ Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
+ for (int i = 1; i < elements_limit && frames_seen < frame_limit; i += 4) {
+ Handle<Object> recv = handle(elements->get(i), this);
+ Handle<JSFunction> fun =
+ handle(JSFunction::cast(elements->get(i + 1)), this);
+ Handle<Code> code = handle(Code::cast(elements->get(i + 2)), this);
+ Handle<Smi> offset = handle(Smi::cast(elements->get(i + 3)), this);
+ Address pc = code->address() + offset->value();
+ bool is_constructor =
+ recv->IsJSObject() &&
+ Handle<JSObject>::cast(recv)->map()->constructor() == *fun;
+
+ Handle<JSObject> stack_frame =
+ helper.NewStackFrameObject(fun, code, pc, is_constructor);
+
+ FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
+ frames_seen++;
+ }
+
+ stack_trace->set_length(Smi::FromInt(frames_seen));
+ return stack_trace;
+}
+
+
Handle<JSArray> Isolate::CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options) {
+ CaptureStackTraceHelper helper(this, options);
+
// Ensure no negative values.
int limit = Max(frame_limit, 0);
Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
- Handle<String> column_key =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("column"));
- Handle<String> line_key =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("lineNumber"));
- Handle<String> script_id_key =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptId"));
- Handle<String> script_name_key =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptName"));
- Handle<String> script_name_or_source_url_key =
- factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("scriptNameOrSourceURL"));
- Handle<String> function_key =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("functionName"));
- Handle<String> eval_key =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("isEval"));
- Handle<String> constructor_key =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("isConstructor"));
-
StackTraceFrameIterator it(this);
int frames_seen = 0;
while (!it.done() && (frames_seen < limit)) {
@@ -478,70 +645,8 @@
if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
!this->context()->HasSameSecurityTokenAs(fun->context())) continue;
- // Create a JSObject to hold the information for the StackFrame.
- Handle<JSObject> stack_frame = factory()->NewJSObject(object_function());
-
- Handle<Script> script(Script::cast(fun->shared()->script()));
-
- if (options & StackTrace::kLineNumber) {
- int script_line_offset = script->line_offset()->value();
- int position = frames[i].code()->SourcePosition(frames[i].pc());
- int line_number = Script::GetLineNumber(script, position);
- // line_number is already shifted by the script_line_offset.
- int relative_line_number = line_number - script_line_offset;
- if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- int start = (relative_line_number == 0) ? 0 :
- Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
- int column_offset = position - start;
- if (relative_line_number == 0) {
- // For the case where the code is on the same line as the script
- // tag.
- column_offset += script->column_offset()->value();
- }
- JSObject::AddProperty(
- stack_frame, column_key,
- handle(Smi::FromInt(column_offset + 1), this), NONE);
- }
- JSObject::AddProperty(
- stack_frame, line_key,
- handle(Smi::FromInt(line_number + 1), this), NONE);
- }
-
- if (options & StackTrace::kScriptId) {
- JSObject::AddProperty(
- stack_frame, script_id_key, handle(script->id(), this), NONE);
- }
-
- if (options & StackTrace::kScriptName) {
- JSObject::AddProperty(
- stack_frame, script_name_key, handle(script->name(), this), NONE);
- }
-
- if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> result = Script::GetNameOrSourceURL(script);
- JSObject::AddProperty(
- stack_frame, script_name_or_source_url_key, result, NONE);
- }
-
- if (options & StackTrace::kFunctionName) {
- Handle<Object> fun_name(fun->shared()->DebugName(), this);
- JSObject::AddProperty(stack_frame, function_key, fun_name, NONE);
- }
-
- if (options & StackTrace::kIsEval) {
- Handle<Object> is_eval =
- script->compilation_type() == Script::COMPILATION_TYPE_EVAL ?
- factory()->true_value() : factory()->false_value();
- JSObject::AddProperty(stack_frame, eval_key, is_eval, NONE);
- }
-
- if (options & StackTrace::kIsConstructor) {
- Handle<Object> is_constructor = (frames[i].is_constructor()) ?
- factory()->true_value() : factory()->false_value();
- JSObject::AddProperty(
- stack_frame, constructor_key, is_constructor, NONE);
- }
+ Handle<JSObject> stack_frame = helper.NewStackFrameObject(
+ fun, frames[i].code(), frames[i].pc(), frames[i].is_constructor());
FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
frames_seen++;
@@ -912,9 +1017,7 @@
// Advance to the next JavaScript frame and determine if the
// current frame is the top-level frame.
it.Advance();
- Handle<Object> is_top_level = it.done()
- ? factory()->true_value()
- : factory()->false_value();
+ Handle<Object> is_top_level = factory()->ToBoolean(it.done());
// Generate and print stack trace line.
Handle<String> line =
Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
@@ -1006,17 +1109,11 @@
if (capture_stack_trace_for_uncaught_exceptions_) {
if (IsErrorObject(exception)) {
// We fetch the stack trace that corresponds to this error object.
- Handle<Name> key = factory()->detailed_stack_trace_symbol();
- // Look up as own property. If the lookup fails, the exception is
- // probably not a valid Error object. In that case, we fall through
- // and capture the stack trace at this throw site.
- LookupIterator lookup(exception, key,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- Handle<Object> stack_trace_property;
- if (Object::GetProperty(&lookup).ToHandle(&stack_trace_property) &&
- stack_trace_property->IsJSArray()) {
- stack_trace_object = Handle<JSArray>::cast(stack_trace_property);
- }
+ // If the lookup fails, the exception is probably not a valid Error
+ // object. In that case, we fall through and capture the stack trace
+ // at this throw site.
+ stack_trace_object =
+ GetDetailedStackTrace(Handle<JSObject>::cast(exception));
}
if (stack_trace_object.is_null()) {
// Not an error object, we capture at throw site.
@@ -2271,10 +2368,7 @@
if (promise_reject_callback_ == NULL) return;
Handle<JSArray> stack_trace;
if (event == v8::kPromiseRejectWithNoHandler && value->IsJSObject()) {
- Handle<JSObject> error_obj = Handle<JSObject>::cast(value);
- Handle<Name> key = factory()->detailed_stack_trace_symbol();
- Handle<Object> property = JSObject::GetDataProperty(error_obj, key);
- if (property->IsJSArray()) stack_trace = Handle<JSArray>::cast(property);
+ stack_trace = GetDetailedStackTrace(Handle<JSObject>::cast(value));
}
promise_reject_callback_(v8::PromiseRejectMessage(
v8::Utils::PromiseToLocal(promise), event, v8::Utils::ToLocal(value),
diff --git a/src/isolate.h b/src/isolate.h
index 06bd47a..7944d0e 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -746,6 +746,9 @@
void CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object);
void CaptureAndSetSimpleStackTrace(Handle<JSObject> error_object,
Handle<Object> caller);
+ Handle<JSArray> GetDetailedStackTrace(Handle<JSObject> error_object);
+ Handle<JSArray> GetDetailedFromSimpleStackTrace(
+ Handle<JSObject> error_object);
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
diff --git a/src/messages.js b/src/messages.js
index 6578e8d..513ffdc 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -176,7 +176,8 @@
module_export_undefined: ["Export '", "%0", "' is not defined in module"],
unexpected_super: ["'super' keyword unexpected here"],
extends_value_not_a_function: ["Class extends value ", "%0", " is not a function or null"],
- prototype_parent_not_an_object: ["Class extends value does not have valid prototype property ", "%0"]
+ prototype_parent_not_an_object: ["Class extends value does not have valid prototype property ", "%0"],
+ duplicate_constructor: ["A class may only have one constructor"]
};
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 6760b3c..e2064da 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -2482,6 +2482,74 @@
}
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in v0.
+ DCHECK(lit != NULL);
+ __ push(v0);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = a1;
+ __ lw(scratch,
+ FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ lw(scratch, MemOperand(sp, kPointerSize)); // constructor
+ } else {
+ __ lw(scratch, MemOperand(sp, 0)); // prototype
+ }
+ __ push(scratch);
+ VisitForStackValue(key);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ VisitForStackValue(value);
+ __ li(scratch, Operand(Smi::FromInt(NONE)));
+ __ push(scratch);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ VisitForStackValue(value);
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ push(scratch);
+ __ li(scratch, Operand(Smi::FromInt(NONE)));
+ __ push(scratch);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ push(scratch);
+ VisitForStackValue(value);
+ __ li(scratch, Operand(Smi::FromInt(NONE)));
+ __ push(scratch);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
@@ -2919,13 +2987,16 @@
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // t2: copy of the first argument or undefined if it doesn't exist.
+ // t3: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ lw(t2, MemOperand(sp, arg_count * kPointerSize));
+ __ lw(t3, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
}
+ // t2: the receiver of the enclosing function.
+ __ lw(t2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
// t1: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
@@ -2937,8 +3008,9 @@
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
+ __ Push(t3);
__ Push(t2, t1, t0, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
diff --git a/src/mips64/full-codegen-mips64.cc b/src/mips64/full-codegen-mips64.cc
index 0aae037..e653326 100644
--- a/src/mips64/full-codegen-mips64.cc
+++ b/src/mips64/full-codegen-mips64.cc
@@ -2479,6 +2479,74 @@
}
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in v0.
+ DCHECK(lit != NULL);
+ __ push(v0);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = a1;
+ __ ld(scratch,
+ FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ ld(scratch, MemOperand(sp, kPointerSize)); // constructor
+ } else {
+ __ ld(scratch, MemOperand(sp, 0)); // prototype
+ }
+ __ push(scratch);
+ VisitForStackValue(key);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ VisitForStackValue(value);
+ __ li(scratch, Operand(Smi::FromInt(NONE)));
+ __ push(scratch);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ VisitForStackValue(value);
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ push(scratch);
+ __ li(scratch, Operand(Smi::FromInt(NONE)));
+ __ push(scratch);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ push(scratch);
+ VisitForStackValue(value);
+ __ li(scratch, Operand(Smi::FromInt(NONE)));
+ __ push(scratch);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
@@ -2918,13 +2986,16 @@
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // a6: copy of the first argument or undefined if it doesn't exist.
+ // a7: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ ld(a6, MemOperand(sp, arg_count * kPointerSize));
+ __ ld(a7, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a7, Heap::kUndefinedValueRootIndex);
}
+ // a6: the receiver of the enclosing function.
+ __ ld(a6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
// a5: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ld(a5, MemOperand(fp, receiver_offset * kPointerSize));
@@ -2936,8 +3007,9 @@
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
+ __ Push(a7);
__ Push(a6, a5, a4, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
diff --git a/src/objects.cc b/src/objects.cc
index 3a4b9c9..c0814a8 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -14845,13 +14845,13 @@
}
-Handle<Object> CompilationCacheTable::LookupEval(Handle<String> src,
- Handle<Context> context,
- StrictMode strict_mode,
- int scope_position) {
+Handle<Object> CompilationCacheTable::LookupEval(
+ Handle<String> src, Handle<SharedFunctionInfo> outer_info,
+ StrictMode strict_mode, int scope_position) {
Isolate* isolate = GetIsolate();
- Handle<SharedFunctionInfo> shared(context->closure()->shared());
- StringSharedKey key(src, shared, strict_mode, scope_position);
+ // Cache key is the tuple (source, outer shared function info, scope position)
+ // to unambiguously identify the context chain the cached eval code assumes.
+ StringSharedKey key(src, outer_info, strict_mode, scope_position);
int entry = FindEntry(&key);
if (entry == kNotFound) return isolate->factory()->undefined_value();
return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
@@ -14888,11 +14888,10 @@
Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> context, Handle<SharedFunctionInfo> value,
+ Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
int scope_position) {
Isolate* isolate = cache->GetIsolate();
- Handle<SharedFunctionInfo> shared(context->closure()->shared());
- StringSharedKey key(src, shared, value->strict_mode(), scope_position);
+ StringSharedKey key(src, outer_info, value->strict_mode(), scope_position);
cache = EnsureCapacity(cache, 1, &key);
Handle<Object> k = key.AsHandle(isolate);
int entry = cache->FindInsertionEntry(key.Hash());
diff --git a/src/objects.h b/src/objects.h
index fbe06c7..f6c4d19 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -7943,15 +7943,16 @@
public:
// Find cached value for a string key, otherwise return null.
Handle<Object> Lookup(Handle<String> src, Handle<Context> context);
- Handle<Object> LookupEval(Handle<String> src, Handle<Context> context,
- StrictMode strict_mode, int scope_position);
+ Handle<Object> LookupEval(Handle<String> src,
+ Handle<SharedFunctionInfo> shared,
+ StrictMode strict_mode, int scope_position);
Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
static Handle<CompilationCacheTable> Put(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> context, Handle<Object> value);
static Handle<CompilationCacheTable> PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> context, Handle<SharedFunctionInfo> value,
+ Handle<SharedFunctionInfo> context, Handle<SharedFunctionInfo> value,
int scope_position);
static Handle<CompilationCacheTable> PutRegExp(
Handle<CompilationCacheTable> cache, Handle<String> src,
diff --git a/src/preparser.h b/src/preparser.h
index a53abfe..2db88e7 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -476,6 +476,7 @@
ExpressionT ParseObjectLiteral(bool* ok);
ObjectLiteralPropertyT ParsePropertyDefinition(ObjectLiteralChecker* checker,
bool in_class, bool is_static,
+ bool* has_seen_constructor,
bool* ok);
typename Traits::Type::ExpressionList ParseArguments(bool* ok);
ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
@@ -1183,10 +1184,7 @@
return false;
}
- bool IsConstructorProperty(PreParserExpression property) { return false; }
-
static PreParserExpression GetPropertyValue(PreParserExpression property) {
- UNREACHABLE();
return PreParserExpression::Default();
}
@@ -1925,7 +1923,9 @@
template <class Traits>
typename ParserBase<Traits>::ObjectLiteralPropertyT ParserBase<
Traits>::ParsePropertyDefinition(ObjectLiteralChecker* checker,
- bool in_class, bool is_static, bool* ok) {
+ bool in_class, bool is_static,
+ bool* has_seen_constructor, bool* ok) {
+ DCHECK(!in_class || is_static || has_seen_constructor != NULL);
ExpressionT value = this->EmptyExpression();
bool is_get = false;
bool is_set = false;
@@ -1942,8 +1942,10 @@
if (!in_class && !is_generator && peek() == Token::COLON) {
// PropertyDefinition : PropertyName ':' AssignmentExpression
- checker->CheckProperty(name_token, kValueProperty,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ if (checker != NULL) {
+ checker->CheckProperty(name_token, kValueProperty,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ }
Consume(Token::COLON);
value = this->ParseAssignmentExpression(
true, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
@@ -1968,11 +1970,20 @@
return this->EmptyObjectLiteralProperty();
}
+ if (*has_seen_constructor) {
+ ReportMessageAt(scanner()->location(), "duplicate_constructor");
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ }
+
+ *has_seen_constructor = true;
kind = FunctionKind::kNormalFunction;
}
- checker->CheckProperty(name_token, kValueProperty,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ if (checker != NULL) {
+ checker->CheckProperty(name_token, kValueProperty,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ }
value = this->ParseFunctionLiteral(
name, scanner()->location(),
@@ -1983,7 +1994,7 @@
} else if (in_class && name_is_static && !is_static) {
// static MethodDefinition
- return ParsePropertyDefinition(checker, true, true, ok);
+ return ParsePropertyDefinition(checker, true, true, NULL, ok);
} else if (is_get || is_set) {
// Accessor
@@ -1998,16 +2009,15 @@
*ok = false;
return this->EmptyObjectLiteralProperty();
} else if (in_class && !is_static && this->IsConstructor(name)) {
- // ES6, spec draft rev 27, treats static get constructor as an error too.
- // https://bugs.ecmascript.org/show_bug.cgi?id=3223
- // TODO(arv): Update when bug is resolved.
ReportMessageAt(scanner()->location(), "constructor_special_method");
*ok = false;
return this->EmptyObjectLiteralProperty();
}
- checker->CheckProperty(name_token,
- is_get ? kGetterProperty : kSetterProperty,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ if (checker != NULL) {
+ checker->CheckProperty(name_token,
+ is_get ? kGetterProperty : kSetterProperty,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ }
typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
name, scanner()->location(),
@@ -2061,8 +2071,8 @@
const bool in_class = false;
const bool is_static = false;
- ObjectLiteralPropertyT property =
- this->ParsePropertyDefinition(&checker, in_class, is_static, CHECK_OK);
+ ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
+ &checker, in_class, is_static, NULL, CHECK_OK);
// Mark top-level object literals that contain function literals and
// pretenure the literal so it can be added as a constant function
@@ -2744,22 +2754,22 @@
scope_->SetStrictMode(STRICT);
scope_->SetScopeName(name);
- ObjectLiteralChecker checker(this, STRICT);
typename Traits::Type::PropertyList properties =
this->NewPropertyList(4, zone_);
ExpressionT constructor = this->EmptyExpression();
+ bool has_seen_constructor = false;
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
if (Check(Token::SEMICOLON)) continue;
if (fni_ != NULL) fni_->Enter();
-
const bool in_class = true;
const bool is_static = false;
- ObjectLiteralPropertyT property =
- this->ParsePropertyDefinition(&checker, in_class, is_static, CHECK_OK);
+ bool old_has_seen_constructor = has_seen_constructor;
+ ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
+ NULL, in_class, is_static, &has_seen_constructor, CHECK_OK);
- if (this->IsConstructorProperty(property)) {
+ if (has_seen_constructor != old_has_seen_constructor) {
constructor = this->GetPropertyValue(property);
} else {
properties->Add(property, zone());
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc
index 94cd343..2e806fa 100644
--- a/src/runtime/runtime-compiler.cc
+++ b/src/runtime/runtime-compiler.cc
@@ -369,16 +369,18 @@
ParseRestriction restriction = function_literal_only
? ONLY_SINGLE_FUNCTION_LITERAL
: NO_PARSE_RESTRICTION;
+ Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
Handle<JSFunction> fun;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, fun,
- Compiler::GetFunctionFromEval(source, context, SLOPPY, restriction,
- RelocInfo::kNoPosition));
+ Compiler::GetFunctionFromEval(source, outer_info, context, SLOPPY,
+ restriction, RelocInfo::kNoPosition));
return *fun;
}
static ObjectPair CompileGlobalEval(Isolate* isolate, Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
Handle<Object> receiver,
StrictMode strict_mode,
int scope_position) {
@@ -404,8 +406,8 @@
Handle<JSFunction> compiled;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, compiled,
- Compiler::GetFunctionFromEval(source, context, strict_mode, restriction,
- scope_position),
+ Compiler::GetFunctionFromEval(source, outer_info, context, strict_mode,
+ restriction, scope_position),
MakePair(isolate->heap()->exception(), NULL));
return MakePair(*compiled, *receiver);
}
@@ -413,7 +415,7 @@
RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ResolvePossiblyDirectEval) {
HandleScope scope(isolate);
- DCHECK(args.length() == 5);
+ DCHECK(args.length() == 6);
Handle<Object> callee = args.at<Object>(0);
@@ -427,12 +429,14 @@
return MakePair(*callee, isolate->heap()->undefined_value());
}
- DCHECK(args[3]->IsSmi());
- DCHECK(args.smi_at(3) == SLOPPY || args.smi_at(3) == STRICT);
- StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(3));
DCHECK(args[4]->IsSmi());
- return CompileGlobalEval(isolate, args.at<String>(1), args.at<Object>(2),
- strict_mode, args.smi_at(4));
+ DCHECK(args.smi_at(4) == SLOPPY || args.smi_at(4) == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(4));
+ DCHECK(args[5]->IsSmi());
+ Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
+ isolate);
+ return CompileGlobalEval(isolate, args.at<String>(1), outer_info,
+ args.at<Object>(3), strict_mode, args.smi_at(5));
}
}
} // namespace v8::internal
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index 969ddaf..1555106 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -2043,6 +2043,7 @@
// Compile and evaluate source for the given context.
static MaybeHandle<Object> DebugEvaluate(Isolate* isolate,
+ Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
Handle<Object> context_extension,
Handle<Object> receiver,
@@ -2054,11 +2055,11 @@
}
Handle<JSFunction> eval_fun;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, eval_fun, Compiler::GetFunctionFromEval(source, context, SLOPPY,
- NO_PARSE_RESTRICTION,
- RelocInfo::kNoPosition),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, eval_fun,
+ Compiler::GetFunctionFromEval(
+ source, outer_info, context, SLOPPY,
+ NO_PARSE_RESTRICTION, RelocInfo::kNoPosition),
+ Object);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -2118,6 +2119,7 @@
JavaScriptFrame* frame = it.frame();
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
+ Handle<SharedFunctionInfo> outer_info(function->shared());
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -2177,8 +2179,8 @@
}
Handle<Object> receiver(frame->receiver(), isolate);
- MaybeHandle<Object> maybe_result =
- DebugEvaluate(isolate, eval_context, context_extension, receiver, source);
+ MaybeHandle<Object> maybe_result = DebugEvaluate(
+ isolate, outer_info, eval_context, context_extension, receiver, source);
// Remove with-context if it was inserted in between.
if (!inner_context.is_null()) inner_context->set_previous(*function_context);
@@ -2224,10 +2226,11 @@
// debugger was invoked.
Handle<Context> context = isolate->native_context();
Handle<JSObject> receiver(context->global_proxy());
+ Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- DebugEvaluate(isolate, context, context_extension, receiver, source));
+ isolate, result, DebugEvaluate(isolate, outer_info, context,
+ context_extension, receiver, source));
return *result;
}
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index 657a17d..5217f62 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -521,7 +521,7 @@
#define RUNTIME_FUNCTION_LIST_RETURN_PAIR(F) \
F(LoadLookupSlot, 2, 2) \
F(LoadLookupSlotNoReferenceError, 2, 2) \
- F(ResolvePossiblyDirectEval, 5, 2) \
+ F(ResolvePossiblyDirectEval, 6, 2) \
F(ForInInit, 2, 2) /* TODO(turbofan): Only temporary */ \
F(ForInNext, 4, 2) /* TODO(turbofan): Only temporary */
diff --git a/src/type-feedback-vector.cc b/src/type-feedback-vector.cc
index c0078c5..676290c 100644
--- a/src/type-feedback-vector.cc
+++ b/src/type-feedback-vector.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/ic/ic.h"
#include "src/ic/ic-state.h"
#include "src/objects.h"
#include "src/type-feedback-vector-inl.h"
@@ -151,9 +152,142 @@
FeedbackVectorICSlot slot(i);
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
- ICUtility::Clear(isolate, Code::CALL_IC, host, this, slot);
+ // TODO(mvstanton): To make this code work with --vector-ics,
+ // additional Nexus types must be created.
+ DCHECK(!FLAG_vector_ics);
+ DCHECK(GetKind(slot) == Code::CALL_IC);
+ CallICNexus nexus(this, slot);
+ ICUtility::Clear(isolate, Code::CALL_IC, host, &nexus);
}
}
}
+
+
+Handle<FixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
+ Isolate* isolate = GetIsolate();
+ Handle<Object> feedback = handle(GetFeedback(), isolate);
+ if (!feedback->IsFixedArray() ||
+ FixedArray::cast(*feedback)->length() != length) {
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
+ SetFeedback(*array);
+ return array;
+ }
+ return Handle<FixedArray>::cast(feedback);
+}
+
+
+void FeedbackNexus::InstallHandlers(int start_index, TypeHandleList* types,
+ CodeHandleList* handlers) {
+ Isolate* isolate = GetIsolate();
+ FixedArray* array = FixedArray::cast(GetFeedback());
+ int receiver_count = types->length();
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate);
+ array->set(start_index + (current * 2), *map);
+ array->set(start_index + (current * 2 + 1), *handlers->at(current));
+ }
+}
+
+
+InlineCacheState CallICNexus::StateFromFeedback() const {
+ Isolate* isolate = GetIsolate();
+ InlineCacheState state = UNINITIALIZED;
+ Object* feedback = GetFeedback();
+
+ if (feedback == *vector()->MegamorphicSentinel(isolate)) {
+ state = GENERIC;
+ } else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) {
+ state = MONOMORPHIC;
+ } else {
+ CHECK(feedback == *vector()->UninitializedSentinel(isolate));
+ }
+
+ return state;
+}
+
+
+void CallICNexus::ConfigureGeneric() {
+ SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
+}
+
+
+void CallICNexus::ConfigureMonomorphicArray() {
+ Object* feedback = GetFeedback();
+ if (!feedback->IsAllocationSite()) {
+ Handle<AllocationSite> new_site =
+ GetIsolate()->factory()->NewAllocationSite();
+ SetFeedback(*new_site);
+ }
+}
+
+
+void CallICNexus::ConfigureUninitialized() {
+ SetFeedback(*vector()->UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+}
+
+
+void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
+ SetFeedback(*function);
+}
+
+
+int FeedbackNexus::ExtractMaps(int start_index, MapHandleList* maps) const {
+ Isolate* isolate = GetIsolate();
+ Object* feedback = GetFeedback();
+ if (feedback->IsFixedArray()) {
+ FixedArray* array = FixedArray::cast(feedback);
+ // The array should be of the form [<optional name>], then
+ // [map, handler, map, handler, ... ]
+ DCHECK(array->length() >= (2 + start_index));
+ for (int i = start_index; i < array->length(); i += 2) {
+ Map* map = Map::cast(array->get(i));
+ maps->Add(handle(map, isolate));
+ }
+ return (array->length() - start_index) / 2;
+ }
+
+ return 0;
+}
+
+
+MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(int start_index,
+ Handle<Map> map) const {
+ Object* feedback = GetFeedback();
+ if (feedback->IsFixedArray()) {
+ FixedArray* array = FixedArray::cast(feedback);
+ for (int i = start_index; i < array->length(); i += 2) {
+ Map* array_map = Map::cast(array->get(i));
+ if (array_map == *map) {
+ Code* code = Code::cast(array->get(i + 1));
+ DCHECK(code->kind() == Code::HANDLER);
+ return handle(code);
+ }
+ }
+ }
+
+ return MaybeHandle<Code>();
+}
+
+
+bool FeedbackNexus::FindHandlers(int start_index, CodeHandleList* code_list,
+ int length) const {
+ Object* feedback = GetFeedback();
+ int count = 0;
+ if (feedback->IsFixedArray()) {
+ FixedArray* array = FixedArray::cast(feedback);
+ // The array should be of the form [<optional name>], then
+ // [map, handler, map, handler, ... ]
+ DCHECK(array->length() >= (2 + start_index));
+ for (int i = start_index; i < array->length(); i += 2) {
+ Code* code = Code::cast(array->get(i + 1));
+ DCHECK(code->kind() == Code::HANDLER);
+ code_list->Add(handle(code));
+ count++;
+ }
+ }
+ return count == length;
+}
}
} // namespace v8::internal
diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h
index 61463b0..de5a7e1 100644
--- a/src/type-feedback-vector.h
+++ b/src/type-feedback-vector.h
@@ -173,6 +173,102 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackVector);
};
+
+
+// A FeedbackNexus is the combination of a TypeFeedbackVector and a slot.
+// Derived classes customize the update and retrieval of feedback.
+class FeedbackNexus {
+ public:
+ FeedbackNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ : vector_handle_(vector), vector_(NULL), slot_(slot) {}
+ FeedbackNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : vector_(vector), slot_(slot) {}
+ virtual ~FeedbackNexus() {}
+
+ Handle<TypeFeedbackVector> vector_handle() const {
+ DCHECK(vector_ == NULL);
+ return vector_handle_;
+ }
+ TypeFeedbackVector* vector() const {
+ return vector_handle_.is_null() ? vector_ : *vector_handle_;
+ }
+ FeedbackVectorICSlot slot() const { return slot_; }
+
+ InlineCacheState ic_state() const { return StateFromFeedback(); }
+ Map* FindFirstMap() const {
+ MapHandleList maps;
+ ExtractMaps(&maps);
+ if (maps.length() > 0) return *maps.at(0);
+ return NULL;
+ }
+
+ virtual InlineCacheState StateFromFeedback() const = 0;
+ virtual int ExtractMaps(MapHandleList* maps) const = 0;
+ virtual MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const = 0;
+ virtual bool FindHandlers(CodeHandleList* code_list, int length = -1) const {
+ return length == 0;
+ }
+ virtual Name* FindFirstName() const { return NULL; }
+
+ Object* GetFeedback() const { return vector()->Get(slot()); }
+
+ protected:
+ Isolate* GetIsolate() const { return vector()->GetIsolate(); }
+
+ void SetFeedback(Object* feedback,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER) {
+ vector()->Set(slot(), feedback, mode);
+ }
+
+ Handle<FixedArray> EnsureArrayOfSize(int length);
+ void InstallHandlers(int start_index, TypeHandleList* types,
+ CodeHandleList* handlers);
+ int ExtractMaps(int start_index, MapHandleList* maps) const;
+ MaybeHandle<Code> FindHandlerForMap(int start_index, Handle<Map> map) const;
+ bool FindHandlers(int start_index, CodeHandleList* code_list,
+ int length) const;
+
+ private:
+ // The reason for having a vector handle and a raw pointer is that we can and
+ // should use handles during IC miss, but not during GC when we clear ICs. If
+ // you have a handle to the vector that is better because more operations can
+ // be done, like allocation.
+ Handle<TypeFeedbackVector> vector_handle_;
+ TypeFeedbackVector* vector_;
+ FeedbackVectorICSlot slot_;
+};
+
+
+class CallICNexus : public FeedbackNexus {
+ public:
+ CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->GetKind(slot) == Code::CALL_IC);
+ }
+ CallICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->GetKind(slot) == Code::CALL_IC);
+ }
+
+ void ConfigureUninitialized();
+ void ConfigureGeneric();
+ void ConfigureMonomorphicArray();
+ void ConfigureMonomorphic(Handle<JSFunction> function);
+
+ virtual InlineCacheState StateFromFeedback() const OVERRIDE;
+
+ virtual int ExtractMaps(MapHandleList* maps) const OVERRIDE {
+ // CallICs don't record map feedback.
+ return 0;
+ }
+ virtual MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const OVERRIDE {
+ return MaybeHandle<Code>();
+ }
+ virtual bool FindHandlers(CodeHandleList* code_list,
+ int length = -1) const OVERRIDE {
+ return length == 0;
+ }
+};
}
} // namespace v8::internal
diff --git a/src/version.cc b/src/version.cc
index 0e20eb2..c4b5007 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 30
-#define BUILD_NUMBER 20
+#define BUILD_NUMBER 21
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 4229f44..fe06fc7 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -2420,6 +2420,67 @@
}
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in rax.
+ DCHECK(lit != NULL);
+ __ Push(rax);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = rbx;
+ __ movp(scratch, FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ Push(Operand(rsp, kPointerSize)); // constructor
+ } else {
+ __ Push(Operand(rsp, 0)); // prototype
+ }
+ VisitForStackValue(key);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ VisitForStackValue(value);
+ __ Push(Smi::FromInt(NONE));
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ VisitForStackValue(value);
+ __ Push(isolate()->factory()->null_value());
+ __ Push(Smi::FromInt(NONE));
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ Push(isolate()->factory()->null_value());
+ VisitForStackValue(value);
+ __ Push(Smi::FromInt(NONE));
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
@@ -2855,6 +2916,9 @@
__ PushRoot(Heap::kUndefinedValueRootIndex);
}
+ // Push the enclosing function.
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+
// Push the receiver of the enclosing function and do runtime call.
StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
__ Push(args.GetReceiverOperand());
@@ -2866,7 +2930,7 @@
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
diff --git a/src/x87/full-codegen-x87.cc b/src/x87/full-codegen-x87.cc
index 9240fc8..1cc8caa 100644
--- a/src/x87/full-codegen-x87.cc
+++ b/src/x87/full-codegen-x87.cc
@@ -2408,6 +2408,67 @@
}
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in eax.
+ DCHECK(lit != NULL);
+ __ push(eax);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = ebx;
+ __ mov(scratch, FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ push(Operand(esp, kPointerSize)); // constructor
+ } else {
+ __ push(Operand(esp, 0)); // prototype
+ }
+ VisitForStackValue(key);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ VisitForStackValue(value);
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ VisitForStackValue(value);
+ __ push(Immediate(isolate()->factory()->null_value()));
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ push(Immediate(isolate()->factory()->null_value()));
+ VisitForStackValue(value);
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
@@ -2844,6 +2905,8 @@
__ push(Immediate(isolate()->factory()->undefined_value()));
}
+ // Push the enclosing function.
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the language mode.
@@ -2853,7 +2916,7 @@
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index 26ba0e2..1dcd35b 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -65,6 +65,7 @@
'compiler/test-js-constant-cache.cc',
'compiler/test-js-typed-lowering.cc',
'compiler/test-linkage.cc',
+ 'compiler/test-loop-assignment-analysis.cc',
'compiler/test-machine-operator-reducer.cc',
'compiler/test-node-algorithm.cc',
'compiler/test-node-cache.cc',
diff --git a/test/cctest/compiler/function-tester.h b/test/cctest/compiler/function-tester.h
index eb6bd49..600f6a3 100644
--- a/test/cctest/compiler/function-tester.h
+++ b/test/cctest/compiler/function-tester.h
@@ -165,9 +165,7 @@
if (flags_ & CompilationInfo::kTypingEnabled) {
info.MarkAsTypingEnabled();
}
- CHECK(Rewriter::Rewrite(&info));
- CHECK(Scope::Analyze(&info));
- CHECK(AstNumbering::Renumber(info.function(), info.zone()));
+ CHECK(Compiler::Analyze(&info));
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
Pipeline pipeline(&info);
@@ -215,9 +213,7 @@
CHECK(Parser::Parse(&info));
info.SetOptimizing(BailoutId::None(),
Handle<Code>(function->shared()->code()));
- CHECK(Rewriter::Rewrite(&info));
- CHECK(Scope::Analyze(&info));
- CHECK(AstNumbering::Renumber(info.function(), info.zone()));
+ CHECK(Compiler::Analyze(&info));
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
Pipeline pipeline(&info);
diff --git a/test/cctest/compiler/test-codegen-deopt.cc b/test/cctest/compiler/test-codegen-deopt.cc
index 799fa94..f01b0e1 100644
--- a/test/cctest/compiler/test-codegen-deopt.cc
+++ b/test/cctest/compiler/test-codegen-deopt.cc
@@ -47,9 +47,7 @@
bailout_id(-1) {
CHECK(Parser::Parse(&info));
info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
- CHECK(Rewriter::Rewrite(&info));
- CHECK(Scope::Analyze(&info));
- CHECK(AstNumbering::Renumber(info.function(), info.zone()));
+ CHECK(Compiler::Analyze(&info));
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
DCHECK(info.shared_info()->has_deoptimization_support());
diff --git a/test/cctest/compiler/test-js-typed-lowering.cc b/test/cctest/compiler/test-js-typed-lowering.cc
index d48a462..8a78c93 100644
--- a/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/test/cctest/compiler/test-js-typed-lowering.cc
@@ -508,8 +508,12 @@
{ // ToBoolean(string)
Node* r = R.ReduceUnop(op, Type::String());
- // TODO(titzer): test will break with better js-typed-lowering
- CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+ CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+ Node* i = r->InputAt(0);
+ CHECK_EQ(IrOpcode::kNumberEqual, i->opcode());
+ Node* j = i->InputAt(0);
+ CHECK_EQ(IrOpcode::kLoadField, j->opcode());
+ // ToBoolean(x:string) => BooleanNot(NumberEqual(x.length, #0))
}
{ // ToBoolean(object)
diff --git a/test/cctest/compiler/test-loop-assignment-analysis.cc b/test/cctest/compiler/test-loop-assignment-analysis.cc
new file mode 100644
index 0000000..aabd95b
--- /dev/null
+++ b/test/cctest/compiler/test-loop-assignment-analysis.cc
@@ -0,0 +1,294 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/ast-loop-assignment-analyzer.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+namespace {
+const int kBufferSize = 1024;
+
+struct TestHelper : public HandleAndZoneScope {
+ Handle<JSFunction> function;
+ LoopAssignmentAnalysis* result;
+
+ explicit TestHelper(const char* body)
+ : function(Handle<JSFunction>::null()), result(NULL) {
+ ScopedVector<char> program(kBufferSize);
+ SNPrintF(program, "function f(a,b,c) { %s; } f;", body);
+ v8::Local<v8::Value> v = CompileRun(program.start());
+ Handle<Object> obj = v8::Utils::OpenHandle(*v);
+ function = Handle<JSFunction>::cast(obj);
+ }
+
+ void CheckLoopAssignedCount(int expected, const char* var_name) {
+ // TODO(titzer): don't scope analyze every single time.
+ CompilationInfo info(function, main_zone());
+
+ CHECK(Parser::Parse(&info));
+ CHECK(Rewriter::Rewrite(&info));
+ CHECK(Scope::Analyze(&info));
+
+ Scope* scope = info.function()->scope();
+ AstValueFactory* factory = info.ast_value_factory();
+ CHECK_NE(NULL, scope);
+
+ if (result == NULL) {
+ AstLoopAssignmentAnalyzer analyzer(main_zone(), &info);
+ result = analyzer.Analyze();
+ CHECK_NE(NULL, result);
+ }
+
+ const i::AstRawString* name = factory->GetOneByteString(var_name);
+
+ i::Variable* var = scope->Lookup(name);
+ CHECK_NE(NULL, var);
+
+ if (var->location() == Variable::UNALLOCATED) {
+ CHECK_EQ(0, expected);
+ } else {
+ CHECK(var->IsStackAllocated());
+ CHECK_EQ(expected, result->GetAssignmentCountForTesting(scope, var));
+ }
+ }
+};
+}
+
+
+TEST(SimpleLoop1) {
+ TestHelper f("var x = 0; while (x) ;");
+
+ f.CheckLoopAssignedCount(0, "x");
+}
+
+
+TEST(SimpleLoop2) {
+ const char* loops[] = {
+ "while (x) { var x = 0; }", "for(;;) { var x = 0; }",
+ "for(;x;) { var x = 0; }", "for(;x;x) { var x = 0; }",
+ "for(var i = x; x; x) { var x = 0; }", "for(y in 0) { var x = 0; }",
+ "for(y of 0) { var x = 0; }", "for(var x = 0; x; x++) { }",
+ "for(var x = 0; x++;) { }", "var x; for(;x;x++) { }",
+ "var x; do { x = 1; } while (0);", "do { var x = 1; } while (0);"};
+
+ for (size_t i = 0; i < arraysize(loops); i++) {
+ TestHelper f(loops[i]);
+ f.CheckLoopAssignedCount(1, "x");
+ }
+}
+
+
+TEST(ForInOf1) {
+ const char* loops[] = {
+ "for(x in 0) { }", "for(x of 0) { }",
+ };
+
+ for (size_t i = 0; i < arraysize(loops); i++) {
+ TestHelper f(loops[i]);
+ f.CheckLoopAssignedCount(0, "x");
+ }
+}
+
+
+TEST(Param1) {
+ TestHelper f("while (1) a = 0;");
+
+ f.CheckLoopAssignedCount(1, "a");
+ f.CheckLoopAssignedCount(0, "b");
+ f.CheckLoopAssignedCount(0, "c");
+}
+
+
+TEST(Param2) {
+ TestHelper f("for (;;) b = 0;");
+
+ f.CheckLoopAssignedCount(0, "a");
+ f.CheckLoopAssignedCount(1, "b");
+ f.CheckLoopAssignedCount(0, "c");
+}
+
+
+TEST(Param2b) {
+ TestHelper f("a; b; c; for (;;) b = 0;");
+
+ f.CheckLoopAssignedCount(0, "a");
+ f.CheckLoopAssignedCount(1, "b");
+ f.CheckLoopAssignedCount(0, "c");
+}
+
+
+TEST(Param3) {
+ TestHelper f("for(x in 0) c = 0;");
+
+ f.CheckLoopAssignedCount(0, "a");
+ f.CheckLoopAssignedCount(0, "b");
+ f.CheckLoopAssignedCount(1, "c");
+}
+
+
+TEST(Param3b) {
+ TestHelper f("a; b; c; for(x in 0) c = 0;");
+
+ f.CheckLoopAssignedCount(0, "a");
+ f.CheckLoopAssignedCount(0, "b");
+ f.CheckLoopAssignedCount(1, "c");
+}
+
+
+TEST(NestedLoop1) {
+ TestHelper f("while (x) { while (x) { var x = 0; } }");
+
+ f.CheckLoopAssignedCount(2, "x");
+}
+
+
+TEST(NestedLoop2) {
+ TestHelper f("while (0) { while (0) { var x = 0; } }");
+
+ f.CheckLoopAssignedCount(2, "x");
+}
+
+
+TEST(NestedLoop3) {
+ TestHelper f("while (0) { var y = 1; while (0) { var x = 0; } }");
+
+ f.CheckLoopAssignedCount(2, "x");
+ f.CheckLoopAssignedCount(1, "y");
+}
+
+
+TEST(NestedInc1) {
+ const char* loops[] = {
+ "while (1) a(b++);",
+ "while (1) a(0, b++);",
+ "while (1) a(0, 0, b++);",
+ "while (1) a(b++, 1, 1);",
+ "while (1) a(++b);",
+ "while (1) a + (b++);",
+ "while (1) (b++) + a;",
+ "while (1) a + c(b++);",
+ "while (1) throw b++;",
+ "while (1) switch (b++) {} ;",
+ "while (1) switch (a) {case (b++): 0; } ;",
+ "while (1) switch (a) {case b: b++; } ;",
+ "while (1) a == (b++);",
+ "while (1) a === (b++);",
+ "while (1) +(b++);",
+ "while (1) ~(b++);",
+ "while (1) new a(b++);",
+ "while (1) (b++).f;",
+ "while (1) a[b++];",
+ "while (1) (b++)();",
+ "while (1) [b++];",
+ "while (1) [0,b++];",
+ "while (1) var y = [11,b++,12];",
+ "while (1) var y = {f:11,g:(b++),h:12};",
+ "while (1) try {b++;} finally {};",
+ "while (1) try {} finally {b++};",
+ "while (1) try {b++;} catch (e) {};",
+ "while (1) try {} catch (e) {b++};",
+ "while (1) return b++;",
+ "while (1) (b++) ? b : b;",
+ "while (1) b ? (b++) : b;",
+ "while (1) b ? b : (b++);",
+ };
+
+ for (size_t i = 0; i < arraysize(loops); i++) {
+ TestHelper f(loops[i]);
+ f.CheckLoopAssignedCount(1, "b");
+ }
+}
+
+
+TEST(NestedAssign1) {
+ const char* loops[] = {
+ "while (1) a(b=1);",
+ "while (1) a(0, b=1);",
+ "while (1) a(0, 0, b=1);",
+ "while (1) a(b=1, 1, 1);",
+ "while (1) a + (b=1);",
+ "while (1) (b=1) + a;",
+ "while (1) a + c(b=1);",
+ "while (1) throw b=1;",
+ "while (1) switch (b=1) {} ;",
+ "while (1) switch (a) {case b=1: 0; } ;",
+ "while (1) switch (a) {case b: b=1; } ;",
+ "while (1) a == (b=1);",
+ "while (1) a === (b=1);",
+ "while (1) +(b=1);",
+ "while (1) ~(b=1);",
+ "while (1) new a(b=1);",
+ "while (1) (b=1).f;",
+ "while (1) a[b=1];",
+ "while (1) (b=1)();",
+ "while (1) [b=1];",
+ "while (1) [0,b=1];",
+ "while (1) var z = [11,b=1,12];",
+ "while (1) var y = {f:11,g:(b=1),h:12};",
+ "while (1) try {b=1;} finally {};",
+ "while (1) try {} finally {b=1};",
+ "while (1) try {b=1;} catch (e) {};",
+ "while (1) try {} catch (e) {b=1};",
+ "while (1) return b=1;",
+ "while (1) (b=1) ? b : b;",
+ "while (1) b ? (b=1) : b;",
+ "while (1) b ? b : (b=1);",
+ };
+
+ for (size_t i = 0; i < arraysize(loops); i++) {
+ TestHelper f(loops[i]);
+ f.CheckLoopAssignedCount(1, "b");
+ }
+}
+
+
+TEST(NestedLoops3) {
+ TestHelper f("var x, y, z, w; while (x++) while (y++) while (z++) ; w;");
+
+ f.CheckLoopAssignedCount(1, "x");
+ f.CheckLoopAssignedCount(2, "y");
+ f.CheckLoopAssignedCount(3, "z");
+ f.CheckLoopAssignedCount(0, "w");
+}
+
+
+TEST(NestedLoops3b) {
+ TestHelper f(
+ "var x, y, z, w;"
+ "while (1) { x=1; while (1) { y=1; while (1) z=1; } }"
+ "w;");
+
+ f.CheckLoopAssignedCount(1, "x");
+ f.CheckLoopAssignedCount(2, "y");
+ f.CheckLoopAssignedCount(3, "z");
+ f.CheckLoopAssignedCount(0, "w");
+}
+
+
+TEST(NestedLoops3c) {
+ TestHelper f(
+ "var x, y, z, w;"
+ "while (1) {"
+ " x++;"
+ " while (1) {"
+ " y++;"
+ " while (1) z++;"
+ " }"
+ " while (1) {"
+ " y++;"
+ " while (1) z++;"
+ " }"
+ "}"
+ "w;");
+
+ f.CheckLoopAssignedCount(1, "x");
+ f.CheckLoopAssignedCount(3, "y");
+ f.CheckLoopAssignedCount(5, "z");
+ f.CheckLoopAssignedCount(0, "w");
+}
diff --git a/test/cctest/compiler/test-pipeline.cc b/test/cctest/compiler/test-pipeline.cc
index 9d8a2d1..98b0bae 100644
--- a/test/cctest/compiler/test-pipeline.cc
+++ b/test/cctest/compiler/test-pipeline.cc
@@ -23,11 +23,7 @@
*v8::Handle<v8::Function>::Cast(CompileRun(source)));
CompilationInfoWithZone info(function);
- CHECK(Parser::Parse(&info));
- CHECK(Rewriter::Rewrite(&info));
- CHECK(Scope::Analyze(&info));
- CHECK(AstNumbering::Renumber(info.function(), info.zone()));
- CHECK_NE(NULL, info.scope());
+ CHECK(Compiler::ParseAndAnalyze(&info));
Pipeline pipeline(&info);
#if V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/test-scheduler.cc b/test/cctest/compiler/test-scheduler.cc
index e866876..f6019f7 100644
--- a/test/cctest/compiler/test-scheduler.cc
+++ b/test/cctest/compiler/test-scheduler.cc
@@ -1802,6 +1802,45 @@
}
+TEST(LoopedFloatingDiamond) {
+ HandleAndZoneScope scope;
+ Graph graph(scope.main_zone());
+ CommonOperatorBuilder common(scope.main_zone());
+ SimplifiedOperatorBuilder simplified(scope.main_zone());
+ MachineOperatorBuilder machine;
+
+ Node* start = graph.NewNode(common.Start(2));
+ graph.SetStart(start);
+
+ Node* p0 = graph.NewNode(common.Parameter(0), start);
+
+ Node* c = graph.NewNode(common.Int32Constant(7));
+ Node* loop = graph.NewNode(common.Loop(2), start, start);
+ Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
+ Node* add = graph.NewNode(machine.IntAdd(), ind, c);
+
+ Node* br = graph.NewNode(common.Branch(), add, loop);
+ Node* t = graph.NewNode(common.IfTrue(), br);
+ Node* f = graph.NewNode(common.IfFalse(), br);
+
+ Node* br1 = graph.NewNode(common.Branch(), p0, graph.start());
+ Node* t1 = graph.NewNode(common.IfTrue(), br1);
+ Node* f1 = graph.NewNode(common.IfFalse(), br1);
+ Node* m1 = graph.NewNode(common.Merge(2), t1, f1);
+ Node* phi1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), add, p0, m1);
+
+ loop->ReplaceInput(1, t); // close loop.
+ ind->ReplaceInput(1, phi1); // close induction variable.
+
+ Node* ret = graph.NewNode(common.Return(), ind, start, f);
+ Node* end = graph.NewNode(common.End(), ret, f);
+
+ graph.SetEnd(end);
+
+ ComputeAndVerifySchedule(20, &graph);
+}
+
+
TEST(PhisPushedDownToDifferentBranches) {
HandleAndZoneScope scope;
Graph graph(scope.main_zone());
diff --git a/test/cctest/compiler/test-simplified-lowering.cc b/test/cctest/compiler/test-simplified-lowering.cc
index 47acbe1..e77f57a 100644
--- a/test/cctest/compiler/test-simplified-lowering.cc
+++ b/test/cctest/compiler/test-simplified-lowering.cc
@@ -1720,11 +1720,11 @@
TestingGraph t(Type::Signed32());
Node* k = t.jsgraph.Constant(constants[i]);
Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), div);
- t.Return(trunc);
+ Node* use = t.Use(div, kMachInt32);
+ t.Return(use);
t.Lower();
- CHECK_EQ(IrOpcode::kInt32Div, div->opcode());
+ CHECK_EQ(IrOpcode::kInt32Div, use->InputAt(0)->opcode());
}
}
@@ -1761,11 +1761,11 @@
TestingGraph t(Type::Unsigned32());
Node* k = t.jsgraph.Constant(constants[i]);
Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), div);
- t.Return(trunc);
+ Node* use = t.Use(div, kMachUint32);
+ t.Return(use);
t.Lower();
- CHECK_EQ(IrOpcode::kUint32Div, div->opcode());
+ CHECK_EQ(IrOpcode::kUint32Div, use->InputAt(0)->opcode());
}
}
@@ -1795,28 +1795,39 @@
TEST(NumberDivide_BadConstants) {
- int32_t constants[] = {-1, 0};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
+ {
TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(constants[i]);
+ Node* k = t.jsgraph.Constant(-1);
Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), div);
- t.Return(trunc);
+ Node* use = t.Use(div, kMachInt32);
+ t.Return(use);
t.Lower();
- CHECK_EQ(IrOpcode::kFloat64Div, div->opcode());
+ CHECK_EQ(IrOpcode::kInt32Sub, use->InputAt(0)->opcode());
+ }
+
+ {
+ TestingGraph t(Type::Signed32());
+ Node* k = t.jsgraph.Constant(0);
+ Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
+ Node* use = t.Use(div, kMachInt32);
+ t.Return(use);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
+ CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
}
{
TestingGraph t(Type::Unsigned32());
Node* k = t.jsgraph.Constant(0);
Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), div);
- t.Return(trunc);
+ Node* use = t.Use(div, kMachUint32);
+ t.Return(use);
t.Lower();
- CHECK_EQ(IrOpcode::kFloat64Div, div->opcode());
+ CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
+ CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
}
}
@@ -1828,11 +1839,11 @@
TestingGraph t(Type::Signed32());
Node* k = t.jsgraph.Constant(constants[i]);
Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mod);
- t.Return(trunc);
+ Node* use = t.Use(mod, kMachInt32);
+ t.Return(use);
t.Lower();
- CHECK_EQ(IrOpcode::kInt32Mod, mod->opcode());
+ CHECK_EQ(IrOpcode::kInt32Mod, use->InputAt(0)->opcode());
}
}
@@ -1870,10 +1881,10 @@
Node* k = t.jsgraph.Constant(constants[i]);
Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), mod);
- t.Return(trunc);
+ Node* ret = t.Return(trunc);
t.Lower();
- CHECK_EQ(IrOpcode::kUint32Mod, mod->opcode());
+ CHECK_EQ(IrOpcode::kUint32Mod, ret->InputAt(0)->opcode());
}
}
@@ -1916,45 +1927,3 @@
CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode()); // Pesky -0 behavior.
}
}
-
-
-TEST(NumberModulus_Uint32) {
- double constants[] = {1, 3, 100, 1000, 100998348};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- TestingGraph t(Type::Unsigned32());
- Node* k = t.jsgraph.Constant(constants[i]);
- Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
- t.Return(mod);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kUint32Mod, mod->opcode());
- }
-}
-
-
-TEST(NumberModulus_BadConstants) {
- int32_t constants[] = {-1, 0};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(constants[i]);
- Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mod);
- t.Return(trunc);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode());
- }
-
- {
- TestingGraph t(Type::Unsigned32());
- Node* k = t.jsgraph.Constant(0);
- Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), mod);
- t.Return(trunc);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode());
- }
-}
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 2e8d4ae..717cfb3 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -17627,6 +17627,7 @@
static void StackTraceForUncaughtExceptionListener(
v8::Handle<v8::Message> message,
v8::Handle<Value>) {
+ report_count++;
v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK_EQ(2, stack_trace->GetFrameCount());
checkStackFrame("origin", "foo", 2, 3, false, false,
@@ -17657,6 +17658,38 @@
Function::Cast(*trouble)->Call(global, 0, NULL);
v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
v8::V8::RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
+ CHECK_EQ(1, report_count);
+}
+
+
+TEST(GetStackTraceForUncaughtExceptionFromSimpleStackTrace) {
+ report_count = 0;
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ // Create an Error object first.
+ CompileRunWithOrigin(
+ "function foo() {\n"
+ "e=new Error('err');\n"
+ "};\n"
+ "function bar() {\n"
+ " foo();\n"
+ "};\n"
+ "var e;",
+ "origin");
+ v8::Local<v8::Object> global = env->Global();
+ Local<Value> trouble = global->Get(v8_str("bar"));
+ CHECK(trouble->IsFunction());
+ Function::Cast(*trouble)->Call(global, 0, NULL);
+
+ // Enable capturing detailed stack trace late, and throw the exception.
+ // The detailed stack trace should be extracted from the simple stack.
+ v8::V8::AddMessageListener(StackTraceForUncaughtExceptionListener);
+ v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ CompileRunWithOrigin("throw e", "origin");
+ v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+ v8::V8::RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
+ CHECK_EQ(1, report_count);
}
diff --git a/test/cctest/test-feedback-vector.cc b/test/cctest/test-feedback-vector.cc
index 79c6ea2..28a15f2 100644
--- a/test/cctest/test-feedback-vector.cc
+++ b/test/cctest/test-feedback-vector.cc
@@ -86,12 +86,13 @@
// Set metadata.
for (int i = 0; i < 30; i++) {
Code::Kind kind;
- if (i % 3 == 0)
+ if (i % 3 == 0) {
kind = Code::CALL_IC;
- else if (i % 3 == 1)
+ } else if (i % 3 == 1) {
kind = Code::LOAD_IC;
- else
+ } else {
kind = Code::KEYED_LOAD_IC;
+ }
vector->SetKind(FeedbackVectorICSlot(i), kind);
}
@@ -197,4 +198,45 @@
CHECK(
feedback_vector->Get(FeedbackVectorICSlot(ic_slot))->IsAllocationSite());
}
+
+
+TEST(VectorCallICStates) {
+ if (i::FLAG_always_opt) return;
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ // Make sure function f has a call that uses a type feedback slot.
+ CompileRun(
+ "function foo() { return 17; }"
+ "function f(a) { a(); } f(foo);");
+ Handle<JSFunction> f = v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
+ // There should be one IC.
+ Handle<TypeFeedbackVector> feedback_vector =
+ Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
+ FeedbackVectorICSlot slot(FLAG_vector_ics ? 1 : 0);
+ CallICNexus nexus(feedback_vector, slot);
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ // CallIC doesn't return map feedback.
+ CHECK_EQ(NULL, nexus.FindFirstMap());
+
+ CompileRun("f(function() { return 16; })");
+ CHECK_EQ(GENERIC, nexus.StateFromFeedback());
+
+ // After a collection, state should be reset to UNINITIALIZED.
+ heap->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK_EQ(UNINITIALIZED, nexus.StateFromFeedback());
+
+ // Array is special. It will remain monomorphic across gcs and it contains an
+ // AllocationSite.
+ CompileRun("f(Array)");
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot))->IsAllocationSite());
+
+ heap->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+}
}
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index 4028167..e45e9eb 100644
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -3273,9 +3273,7 @@
i::Parser parser(&info, &parse_info);
parser.set_allow_harmony_scoping(true);
CHECK(parser.Parse());
- CHECK(i::Rewriter::Rewrite(&info));
- CHECK(i::Scope::Analyze(&info));
- CHECK(i::AstNumbering::Renumber(info.function(), info.zone()));
+ CHECK(i::Compiler::Analyze(&info));
CHECK(info.function() != NULL);
i::Scope* scope = info.function()->scope();
@@ -4041,9 +4039,6 @@
TEST(ClassMultipleConstructorErrors) {
- // We currently do not allow any duplicate properties in class bodies. This
- // test ensures that when we change that we still throw on duplicate
- // constructors.
const char* context_data[][2] = {{"class C {", "}"},
{"(class {", "});"},
{NULL, NULL}};
@@ -4061,9 +4056,7 @@
}
-// TODO(arv): We should allow duplicate property names.
-// https://code.google.com/p/v8/issues/detail?id=3570
-DISABLED_TEST(ClassMultiplePropertyNamesNoErrors) {
+TEST(ClassMultiplePropertyNamesNoErrors) {
const char* context_data[][2] = {{"class C {", "}"},
{"(class {", "});"},
{NULL, NULL}};
@@ -4072,6 +4065,8 @@
"constructor() {}; static constructor() {}",
"m() {}; static m() {}",
"m() {}; m() {}",
+ "static m() {}; static m() {}",
+ "get m() {}; set m(_) {}; get m() {}; set m(_) {};",
NULL};
static const ParserFlag always_flags[] = {
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index 8c5ad0e..05a12f5 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -113,6 +113,7 @@
unsigned char data = buffer[index];
CHECK_EQ(i % 4, TwoBits::decode(data, i));
}
+ buffer.Dispose();
}
diff --git a/test/mjsunit/asm/do-while.js b/test/mjsunit/asm/do-while.js
new file mode 100644
index 0000000..214be64
--- /dev/null
+++ b/test/mjsunit/asm/do-while.js
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib, foreign, buffer) {
+ "use asm";
+ function f(i) {
+ var j;
+ i = i|0;
+ do {
+ if (i > 0) {
+ j = i != 0;
+ i = (i - 1) | 0;
+ } else {
+ j = 0;
+ }
+ } while (j);
+ return i;
+ }
+ return {f:f};
+}
+
+var m = Module(this, {}, new ArrayBuffer(64*1024));
+
+assertEquals(-1, m.f("-1"));
+assertEquals(0, m.f(-Math.infinity));
+assertEquals(0, m.f(undefined));
+assertEquals(0, m.f(0));
+assertEquals(0, m.f(1));
+assertEquals(0, m.f(100));
diff --git a/test/mjsunit/boolean.js b/test/mjsunit/boolean.js
index d955855..9b9edd2 100644
--- a/test/mjsunit/boolean.js
+++ b/test/mjsunit/boolean.js
@@ -72,3 +72,10 @@
assertEquals('foo', o.p || (o.p == 0));
assertEquals('foo', o.p || (o.p == null));
assertEquals('foo', o.p || (o.p == o.p));
+
+// JSToBoolean(x:string)
+function f(x) { return !!("" + x); }
+assertEquals(false, f(""));
+assertEquals(true, f("narf"));
+assertEquals(true, f(12345678));
+assertEquals(true, f(undefined));
diff --git a/test/mjsunit/harmony/classes.js b/test/mjsunit/harmony/classes.js
index a83d483..9302b29 100644
--- a/test/mjsunit/harmony/classes.js
+++ b/test/mjsunit/harmony/classes.js
@@ -153,6 +153,292 @@
})();
+
+(function TestToString() {
+ class C {}
+ assertEquals('class C {}', C.toString());
+
+ class D { constructor() { 42; } }
+ assertEquals('class D { constructor() { 42; } }', D.toString());
+
+ class E { x() { 42; } }
+ assertEquals('class E { x() { 42; } }', E.toString());
+})();
+
+
+function assertMethodDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertTrue(descr.enumerable);
+ assertTrue(descr.writable);
+ assertEquals('function', typeof descr.value);
+ assertFalse('prototype' in descr.value);
+}
+
+
+function assertGetterDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertTrue(descr.enumerable);
+ assertEquals('function', typeof descr.get);
+ assertEquals(undefined, descr.set);
+}
+
+
+function assertSetterDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertTrue(descr.enumerable);
+ assertEquals(undefined, descr.get);
+ assertEquals('function', typeof descr.set);
+}
+
+
+function assertAccessorDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertTrue(descr.enumerable);
+ assertEquals('function', typeof descr.get);
+ assertEquals('function', typeof descr.set);
+}
+
+
+(function TestMethods() {
+ class C {
+ method() { return 1; }
+ static staticMethod() { return 2; }
+ method2() { return 3; }
+ static staticMethod2() { return 4; }
+ }
+
+ assertMethodDescriptor(C.prototype, 'method');
+ assertMethodDescriptor(C.prototype, 'method2');
+ assertMethodDescriptor(C, 'staticMethod');
+ assertMethodDescriptor(C, 'staticMethod2');
+
+ assertEquals(1, new C().method());
+ assertEquals(2, C.staticMethod());
+ assertEquals(3, new C().method2());
+ assertEquals(4, C.staticMethod2());
+})();
+
+
+(function TestGetters() {
+ class C {
+ get x() { return 1; }
+ static get staticX() { return 2; }
+ get y() { return 3; }
+ static get staticY() { return 4; }
+ }
+
+ assertGetterDescriptor(C.prototype, 'x');
+ assertGetterDescriptor(C.prototype, 'y');
+ assertGetterDescriptor(C, 'staticX');
+ assertGetterDescriptor(C, 'staticY');
+
+ assertEquals(1, new C().x);
+ assertEquals(2, C.staticX);
+ assertEquals(3, new C().y);
+ assertEquals(4, C.staticY);
+})();
+
+
+
+(function TestSetters() {
+ var x, staticX, y, staticY;
+ class C {
+ set x(v) { x = v; }
+ static set staticX(v) { staticX = v; }
+ set y(v) { y = v; }
+ static set staticY(v) { staticY = v; }
+ }
+
+ assertSetterDescriptor(C.prototype, 'x');
+ assertSetterDescriptor(C.prototype, 'y');
+ assertSetterDescriptor(C, 'staticX');
+ assertSetterDescriptor(C, 'staticY');
+
+ assertEquals(1, new C().x = 1);
+ assertEquals(1, x);
+ assertEquals(2, C.staticX = 2);
+ assertEquals(2, staticX);
+ assertEquals(3, new C().y = 3);
+ assertEquals(3, y);
+ assertEquals(4, C.staticY = 4);
+ assertEquals(4, staticY);
+})();
+
+
+(function TestSideEffectsInPropertyDefine() {
+ function B() {}
+ B.prototype = {
+ constructor: B,
+ set m(v) {
+ throw Error();
+ }
+ };
+
+ class C extends B {
+ m() { return 1; }
+ }
+
+ assertEquals(1, new C().m());
+})();
+
+
+(function TestAccessors() {
+ class C {
+ constructor(x) {
+ this._x = x;
+ }
+
+ get x() { return this._x; }
+ set x(v) { this._x = v; }
+
+ static get staticX() { return this._x; }
+ static set staticX(v) { this._x = v; }
+ }
+
+ assertAccessorDescriptor(C.prototype, 'x');
+ assertAccessorDescriptor(C, 'staticX');
+
+ var c = new C(1);
+ c._x = 1;
+ assertEquals(1, c.x);
+ c.x = 2;
+ assertEquals(2, c._x);
+
+ C._x = 3;
+ assertEquals(3, C.staticX);
+ C._x = 4;
+ assertEquals(4, C.staticX );
+})();
+
+
+(function TestProto() {
+ class C {
+ __proto__() { return 1; }
+ }
+ assertMethodDescriptor(C.prototype, '__proto__');
+ assertEquals(1, new C().__proto__());
+})();
+
+
+(function TestProtoStatic() {
+ class C {
+ static __proto__() { return 1; }
+ }
+ assertMethodDescriptor(C, '__proto__');
+ assertEquals(1, C.__proto__());
+})();
+
+
+(function TestProtoAccessor() {
+ class C {
+ get __proto__() { return this._p; }
+ set __proto__(v) { this._p = v; }
+ }
+ assertAccessorDescriptor(C.prototype, '__proto__');
+ var c = new C();
+ c._p = 1;
+ assertEquals(1, c.__proto__);
+ c.__proto__ = 2;
+ assertEquals(2, c.__proto__);
+})();
+
+
+(function TestStaticProtoAccessor() {
+ class C {
+ static get __proto__() { return this._p; }
+ static set __proto__(v) { this._p = v; }
+ }
+ assertAccessorDescriptor(C, '__proto__');
+ C._p = 1;
+ assertEquals(1, C.__proto__);
+ C.__proto__ = 2;
+ assertEquals(2, C.__proto__);
+})();
+
+
+(function TestSettersOnProto() {
+ function Base() {}
+ Base.prototype = {
+ set constructor(_) {
+ assertUnreachable();
+ },
+ set m(_) {
+ assertUnreachable();
+ }
+ };
+ Object.defineProperty(Base, 'staticM', {
+ set: function() {
+ assertUnreachable();
+ }
+ });
+
+ class C extends Base {
+ m() {
+ return 1;
+ }
+ static staticM() {
+ return 2;
+ }
+ }
+
+ assertEquals(1, new C().m());
+ assertEquals(2, C.staticM());
+})();
+
+
+(function TestConstructableButNoPrototype() {
+ var Base = function() {}.bind();
+ assertThrows(function() {
+ class C extends Base {}
+ }, TypeError);
+})();
+
+
+(function TestPrototypeGetter() {
+ var calls = 0;
+ var Base = function() {}.bind();
+ Object.defineProperty(Base, 'prototype', {
+ get: function() {
+ calls++;
+ return null;
+ },
+ configurable: true
+ });
+ class C extends Base {}
+ assertEquals(1, calls);
+
+ calls = 0;
+ Object.defineProperty(Base, 'prototype', {
+ get: function() {
+ calls++;
+ return 42;
+ },
+ configurable: true
+ });
+ assertThrows(function() {
+ class C extends Base {}
+ }, TypeError);
+ assertEquals(1, calls);
+})();
+
+
+(function TestPrototypeSetter() {
+ var Base = function() {}.bind();
+ Object.defineProperty(Base, 'prototype', {
+ set: function() {
+ assertUnreachable();
+ }
+ });
+ assertThrows(function() {
+ class C extends Base {}
+ }, TypeError);
+})();
+
+
/* TODO(arv): Implement
(function TestNameBindingInConstructor() {
class C {
@@ -165,15 +451,3 @@
new C();
})();
*/
-
-
-(function TestToString() {
- class C {}
- assertEquals('class C {}', C.toString());
-
- class D { constructor() { 42; } }
- assertEquals('class D { constructor() { 42; } }', D.toString());
-
- class E { x() { 42; } }
- assertEquals('class E { x() { 42; } }', E.toString());
-})();
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index bf297fe..15f89d8 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -309,6 +309,10 @@
'unicodelctest-no-optimization': [PASS, SLOW],
'unicodelctest': [PASS, SLOW],
'unicode-test': [PASS, SLOW],
+
+ # TODO(bmeurer, Rodolph.Perfetta@arm.com): Fails with turbo fan.
+ 'numops-fuzz-part1': [PASS, NO_VARIANTS],
+ 'numops-fuzz-part2': [PASS, NO_VARIANTS],
}], # 'arch == arm64'
['arch == arm64 and mode == debug and simulator_run == True', {
diff --git a/test/mjsunit/regress/regress-crbug-409614.js b/test/mjsunit/regress/regress-crbug-409614.js
new file mode 100644
index 0000000..7b27404
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-409614.js
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug;
+var exception = null;
+var error_count = 0;
+
+function f() {
+ return 0; // Break
+}
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ if (exec_state.frame(0).sourceLineText().indexOf("Break") <0) {
+ error_count++;
+ }
+ exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+ f(); // We should not break in this call of f().
+ } catch (e) {
+ print(e + e.stack);
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+debugger; // Break
+f();
+
+Debug.setListener(null); // Break
+
+assertNull(exception);
+assertEquals(0, error_count);
diff --git a/test/mjsunit/regress/regress-eval-cache.js b/test/mjsunit/regress/regress-eval-cache.js
new file mode 100644
index 0000000..8f8dc18
--- /dev/null
+++ b/test/mjsunit/regress/regress-eval-cache.js
@@ -0,0 +1,19 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function f() {
+ try {
+ throw 1;
+ } catch (e) {
+ var a = 0;
+ var b = 0;
+ var c = 0;
+ var x = 1;
+ var result = eval('eval("x")').toString();
+ assertEquals("1", result);
+ }
+ var x = 2;
+ var result = eval('eval("x")').toString();
+ assertEquals("2", result);
+})();
diff --git a/test/unittests/compiler/js-typed-lowering-unittest.cc b/test/unittests/compiler/js-typed-lowering-unittest.cc
index 3da4bc7..af32d94 100644
--- a/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -69,6 +69,46 @@
// -----------------------------------------------------------------------------
+// JSToBoolean
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
+ Node* input = Parameter(Type::String());
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsBooleanNot(IsNumberEqual(
+ IsLoadField(AccessBuilder::ForStringLength(), input,
+ graph()->start(), graph()->start()),
+ IsNumberConstant(0))));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumberAndBoolean) {
+ Node* p0 = Parameter(Type::OrderedNumber(), 0);
+ Node* p1 = Parameter(Type::Boolean(), 1);
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(),
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p1, control),
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsPhi(kMachAnyTagged,
+ IsBooleanNot(IsNumberEqual(p0, IsNumberConstant(0))), p1, control));
+}
+
+
+// -----------------------------------------------------------------------------
// JSLoadProperty
@@ -102,8 +142,8 @@
r.replacement(),
IsLoadElement(AccessBuilder::ForTypedArrayElement(type, true),
IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- key, IsInt32Constant(static_cast<int>(kLength)), effect,
- control));
+ key, IsNumberConstant(static_cast<double>(kLength)),
+ effect));
}
}
@@ -142,8 +182,8 @@
IsStoreElement(
AccessBuilder::ForTypedArrayElement(type, true),
IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- key, IsInt32Constant(static_cast<int>(kLength)), value,
- effect, control));
+ key, IsNumberConstant(static_cast<double>(kLength)),
+ value, effect, control));
}
}
}
diff --git a/test/unittests/compiler/machine-operator-reducer-unittest.cc b/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 461c2be..fff6f96 100644
--- a/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -482,6 +482,37 @@
// -----------------------------------------------------------------------------
+// Word32And
+
+
+TEST_F(MachineOperatorReducerTest, Word32AndWithWord32AndWithConstant) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FOREACH(int32_t, k, kInt32Values) {
+ TRACED_FOREACH(int32_t, l, kInt32Values) {
+ if (k == 0 || k == -1 || l == 0 || l == -1) continue;
+
+ // (x & K) & L => x & (K & L)
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Word32And(), p0, Int32Constant(k)),
+ Int32Constant(l)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
+
+ // (K & x) & L => x & (K & L)
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Word32And(), Int32Constant(k), p0),
+ Int32Constant(l)));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
// Word32Xor
diff --git a/test/unittests/compiler/machine-operator-unittest.cc b/test/unittests/compiler/machine-operator-unittest.cc
index 41ac5c1..74e5517 100644
--- a/test/unittests/compiler/machine-operator-unittest.cc
+++ b/test/unittests/compiler/machine-operator-unittest.cc
@@ -158,6 +158,7 @@
const Operator* (MachineOperatorBuilder::*constructor)();
IrOpcode::Value opcode;
int value_input_count;
+ int control_input_count;
int value_output_count;
};
@@ -168,43 +169,41 @@
const PureOperator kPureOperators[] = {
-#define PURE(Name, input_count, output_count) \
- { \
- &MachineOperatorBuilder::Name, IrOpcode::k##Name, input_count, \
- output_count \
+#define PURE(Name, value_input_count, control_input_count, value_output_count) \
+ { \
+ &MachineOperatorBuilder::Name, IrOpcode::k##Name, value_input_count, \
+ control_input_count, value_output_count \
}
- PURE(Word32And, 2, 1), PURE(Word32Or, 2, 1),
- PURE(Word32Xor, 2, 1), PURE(Word32Shl, 2, 1),
- PURE(Word32Shr, 2, 1), PURE(Word32Sar, 2, 1),
- PURE(Word32Ror, 2, 1), PURE(Word32Equal, 2, 1),
- PURE(Word64And, 2, 1), PURE(Word64Or, 2, 1),
- PURE(Word64Xor, 2, 1), PURE(Word64Shl, 2, 1),
- PURE(Word64Shr, 2, 1), PURE(Word64Sar, 2, 1),
- PURE(Word64Ror, 2, 1), PURE(Word64Equal, 2, 1),
- PURE(Int32Add, 2, 1), PURE(Int32AddWithOverflow, 2, 2),
- PURE(Int32Sub, 2, 1), PURE(Int32SubWithOverflow, 2, 2),
- PURE(Int32Mul, 2, 1), PURE(Int32MulHigh, 2, 1),
- PURE(Int32Div, 2, 1), PURE(Uint32Div, 2, 1),
- PURE(Int32Mod, 2, 1), PURE(Uint32Mod, 2, 1),
- PURE(Int32LessThan, 2, 1), PURE(Int32LessThanOrEqual, 2, 1),
- PURE(Uint32LessThan, 2, 1), PURE(Uint32LessThanOrEqual, 2, 1),
- PURE(Int64Add, 2, 1), PURE(Int64Sub, 2, 1),
- PURE(Int64Mul, 2, 1), PURE(Int64Div, 2, 1),
- PURE(Uint64Div, 2, 1), PURE(Int64Mod, 2, 1),
- PURE(Uint64Mod, 2, 1), PURE(Int64LessThan, 2, 1),
- PURE(Int64LessThanOrEqual, 2, 1), PURE(Uint64LessThan, 2, 1),
- PURE(ChangeFloat32ToFloat64, 1, 1), PURE(ChangeFloat64ToInt32, 1, 1),
- PURE(ChangeFloat64ToUint32, 1, 1), PURE(ChangeInt32ToInt64, 1, 1),
- PURE(ChangeUint32ToFloat64, 1, 1), PURE(ChangeUint32ToUint64, 1, 1),
- PURE(TruncateFloat64ToFloat32, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1),
- PURE(TruncateInt64ToInt32, 1, 1), PURE(Float64Add, 2, 1),
- PURE(Float64Sub, 2, 1), PURE(Float64Mul, 2, 1),
- PURE(Float64Div, 2, 1), PURE(Float64Mod, 2, 1),
- PURE(Float64Sqrt, 1, 1), PURE(Float64Equal, 2, 1),
- PURE(Float64LessThan, 2, 1), PURE(Float64LessThanOrEqual, 2, 1),
- PURE(LoadStackPointer, 0, 1), PURE(Float64Floor, 1, 1),
- PURE(Float64Ceil, 1, 1), PURE(Float64RoundTruncate, 1, 1),
- PURE(Float64RoundTiesAway, 1, 1),
+ PURE(Word32And, 2, 0, 1), PURE(Word32Or, 2, 0, 1), PURE(Word32Xor, 2, 0, 1),
+ PURE(Word32Shl, 2, 0, 1), PURE(Word32Shr, 2, 0, 1),
+ PURE(Word32Sar, 2, 0, 1), PURE(Word32Ror, 2, 0, 1),
+ PURE(Word32Equal, 2, 0, 1), PURE(Word64And, 2, 0, 1),
+ PURE(Word64Or, 2, 0, 1), PURE(Word64Xor, 2, 0, 1), PURE(Word64Shl, 2, 0, 1),
+ PURE(Word64Shr, 2, 0, 1), PURE(Word64Sar, 2, 0, 1),
+ PURE(Word64Ror, 2, 0, 1), PURE(Word64Equal, 2, 0, 1),
+ PURE(Int32Add, 2, 0, 1), PURE(Int32AddWithOverflow, 2, 0, 2),
+ PURE(Int32Sub, 2, 0, 1), PURE(Int32SubWithOverflow, 2, 0, 2),
+ PURE(Int32Mul, 2, 0, 1), PURE(Int32MulHigh, 2, 0, 1),
+ PURE(Int32Div, 2, 1, 1), PURE(Uint32Div, 2, 1, 1), PURE(Int32Mod, 2, 1, 1),
+ PURE(Uint32Mod, 2, 1, 1), PURE(Int32LessThan, 2, 0, 1),
+ PURE(Int32LessThanOrEqual, 2, 0, 1), PURE(Uint32LessThan, 2, 0, 1),
+ PURE(Uint32LessThanOrEqual, 2, 0, 1), PURE(Int64Add, 2, 0, 1),
+ PURE(Int64Sub, 2, 0, 1), PURE(Int64Mul, 2, 0, 1), PURE(Int64Div, 2, 0, 1),
+ PURE(Uint64Div, 2, 0, 1), PURE(Int64Mod, 2, 0, 1), PURE(Uint64Mod, 2, 0, 1),
+ PURE(Int64LessThan, 2, 0, 1), PURE(Int64LessThanOrEqual, 2, 0, 1),
+ PURE(Uint64LessThan, 2, 0, 1), PURE(ChangeFloat32ToFloat64, 1, 0, 1),
+ PURE(ChangeFloat64ToInt32, 1, 0, 1), PURE(ChangeFloat64ToUint32, 1, 0, 1),
+ PURE(ChangeInt32ToInt64, 1, 0, 1), PURE(ChangeUint32ToFloat64, 1, 0, 1),
+ PURE(ChangeUint32ToUint64, 1, 0, 1),
+ PURE(TruncateFloat64ToFloat32, 1, 0, 1),
+ PURE(TruncateFloat64ToInt32, 1, 0, 1), PURE(TruncateInt64ToInt32, 1, 0, 1),
+ PURE(Float64Add, 2, 0, 1), PURE(Float64Sub, 2, 0, 1),
+ PURE(Float64Mul, 2, 0, 1), PURE(Float64Div, 2, 0, 1),
+ PURE(Float64Mod, 2, 0, 1), PURE(Float64Sqrt, 1, 0, 1),
+ PURE(Float64Equal, 2, 0, 1), PURE(Float64LessThan, 2, 0, 1),
+ PURE(Float64LessThanOrEqual, 2, 0, 1), PURE(LoadStackPointer, 0, 0, 1),
+ PURE(Float64Floor, 1, 0, 1), PURE(Float64Ceil, 1, 0, 1),
+ PURE(Float64RoundTruncate, 1, 0, 1), PURE(Float64RoundTiesAway, 1, 0, 1)
#undef PURE
};
@@ -229,8 +228,10 @@
EXPECT_EQ(pop.value_input_count, OperatorProperties::GetValueInputCount(op));
EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
- EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
- EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(pop.control_input_count,
+ OperatorProperties::GetControlInputCount(op));
+ EXPECT_EQ(pop.value_input_count + pop.control_input_count,
+ OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(pop.value_output_count,
OperatorProperties::GetValueOutputCount(op));
diff --git a/test/unittests/compiler/node-test-utils.cc b/test/unittests/compiler/node-test-utils.cc
index ebeeee5..6f00c37 100644
--- a/test/unittests/compiler/node-test-utils.cc
+++ b/test/unittests/compiler/node-test-utils.cc
@@ -355,11 +355,13 @@
public:
IsLoadFieldMatcher(const Matcher<FieldAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& effect_matcher)
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
: NodeMatcher(IrOpcode::kLoadField),
access_matcher_(access_matcher),
base_matcher_(base_matcher),
- effect_matcher_(effect_matcher) {}
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
@@ -367,8 +369,10 @@
access_matcher_.DescribeTo(os);
*os << "), base (";
base_matcher_.DescribeTo(os);
- *os << ") and effect (";
+ *os << "), effect (";
effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
*os << ")";
}
@@ -380,13 +384,16 @@
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
base_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener));
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
}
private:
const Matcher<FieldAccess> access_matcher_;
const Matcher<Node*> base_matcher_;
const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
};
@@ -396,15 +403,13 @@
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& length_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher)
+ const Matcher<Node*>& effect_matcher)
: NodeMatcher(IrOpcode::kLoadElement),
access_matcher_(access_matcher),
base_matcher_(base_matcher),
index_matcher_(index_matcher),
length_matcher_(length_matcher),
- effect_matcher_(effect_matcher),
- control_matcher_(control_matcher) {}
+ effect_matcher_(effect_matcher) {}
virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
@@ -416,10 +421,8 @@
index_matcher_.DescribeTo(os);
*os << "), length (";
length_matcher_.DescribeTo(os);
- *os << "), effect (";
+ *os << ") and effect (";
effect_matcher_.DescribeTo(os);
- *os << ") and control (";
- control_matcher_.DescribeTo(os);
*os << ")";
}
@@ -435,9 +438,7 @@
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
"length", length_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
+ effect_matcher_, listener));
}
private:
@@ -446,7 +447,6 @@
const Matcher<Node*> index_matcher_;
const Matcher<Node*> length_matcher_;
const Matcher<Node*> effect_matcher_;
- const Matcher<Node*> control_matcher_;
};
@@ -795,9 +795,10 @@
Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& effect_matcher) {
- return MakeMatcher(
- new IsLoadFieldMatcher(access_matcher, base_matcher, effect_matcher));
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsLoadFieldMatcher(access_matcher, base_matcher,
+ effect_matcher, control_matcher));
}
@@ -805,11 +806,10 @@
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& length_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher) {
+ const Matcher<Node*>& effect_matcher) {
return MakeMatcher(new IsLoadElementMatcher(access_matcher, base_matcher,
index_matcher, length_matcher,
- effect_matcher, control_matcher));
+ effect_matcher));
}
@@ -854,6 +854,7 @@
return MakeMatcher( \
new IsBinopMatcher(IrOpcode::k##Name, lhs_matcher, rhs_matcher)); \
}
+IS_BINOP_MATCHER(NumberEqual)
IS_BINOP_MATCHER(NumberLessThan)
IS_BINOP_MATCHER(NumberSubtract)
IS_BINOP_MATCHER(Word32And)
@@ -881,6 +882,7 @@
Matcher<Node*> Is##Name(const Matcher<Node*>& input_matcher) { \
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
+IS_UNOP_MATCHER(BooleanNot)
IS_UNOP_MATCHER(ChangeFloat64ToInt32)
IS_UNOP_MATCHER(ChangeFloat64ToUint32)
IS_UNOP_MATCHER(ChangeInt32ToFloat64)
diff --git a/test/unittests/compiler/node-test-utils.h b/test/unittests/compiler/node-test-utils.h
index c5c61df..f277a10 100644
--- a/test/unittests/compiler/node-test-utils.h
+++ b/test/unittests/compiler/node-test-utils.h
@@ -62,19 +62,22 @@
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberEqual(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberSubtract(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& effect_matcher);
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& length_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher);
+ const Matcher<Node*>& effect_matcher);
Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
diff --git a/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index f96f03c..465ee84 100644
--- a/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -490,46 +490,45 @@
access_nocheck.bounds_check = kNoBoundsCheck;
Node* const base = Parameter(0);
Node* const effect = graph()->start();
- Node* const control = graph()->start();
{
Node* const key = NumberConstant(-42.0);
Node* const length = NumberConstant(100.0);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect, control));
+ base, key, length, effect));
ASSERT_FALSE(r.Changed());
}
{
Node* const key = NumberConstant(-0.0);
Node* const length = NumberConstant(1.0);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect, control));
+ base, key, length, effect));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsLoadElement(access_nocheck, base, key,
- length, effect, control));
+ EXPECT_THAT(r.replacement(),
+ IsLoadElement(access_nocheck, base, key, length, effect));
}
{
- Node* const key = Int32Constant(0);
- Node* const length = Int32Constant(1);
+ Node* const key = NumberConstant(0);
+ Node* const length = NumberConstant(1);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect, control));
+ base, key, length, effect));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsLoadElement(access_nocheck, base, key,
- length, effect, control));
+ EXPECT_THAT(r.replacement(),
+ IsLoadElement(access_nocheck, base, key, length, effect));
}
{
Node* const key = NumberConstant(42.2);
- Node* const length = Int32Constant(128);
+ Node* const length = NumberConstant(128);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect, control));
+ base, key, length, effect));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsLoadElement(access_nocheck, base, key,
- length, effect, control));
+ EXPECT_THAT(r.replacement(),
+ IsLoadElement(access_nocheck, base, key, length, effect));
}
{
Node* const key = NumberConstant(39.2);
Node* const length = NumberConstant(32.0);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect, control));
+ base, key, length, effect));
ASSERT_FALSE(r.Changed());
}
}
@@ -558,7 +557,7 @@
}
{
Node* const key = NumberConstant(-0.0);
- Node* const length = Int32Constant(999);
+ Node* const length = NumberConstant(999);
Reduction r =
Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
length, value, effect, control));
@@ -568,8 +567,8 @@
control));
}
{
- Node* const key = Int32Constant(0);
- Node* const length = Int32Constant(1);
+ Node* const key = NumberConstant(0);
+ Node* const length = NumberConstant(1);
Reduction r =
Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
length, value, effect, control));
@@ -580,7 +579,7 @@
}
{
Node* const key = NumberConstant(42.2);
- Node* const length = Int32Constant(128);
+ Node* const length = NumberConstant(128);
Reduction r =
Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
length, value, effect, control));
diff --git a/test/unittests/compiler/simplified-operator-unittest.cc b/test/unittests/compiler/simplified-operator-unittest.cc
index 8c5a918..dcabc1a 100644
--- a/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/test/unittests/compiler/simplified-operator-unittest.cc
@@ -177,8 +177,8 @@
EXPECT_EQ(3, OperatorProperties::GetValueInputCount(op));
EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
- EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
- EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+ EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 72437d8..d66117c 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -393,6 +393,8 @@
'../../src/compiler/access-builder.h',
'../../src/compiler/ast-graph-builder.cc',
'../../src/compiler/ast-graph-builder.h',
+ '../../src/compiler/ast-loop-assignment-analyzer.cc',
+ '../../src/compiler/ast-loop-assignment-analyzer.h',
'../../src/compiler/basic-block-instrumentor.cc',
'../../src/compiler/basic-block-instrumentor.h',
'../../src/compiler/change-lowering.cc',
@@ -554,7 +556,6 @@
'../../src/factory.h',
'../../src/fast-dtoa.cc',
'../../src/fast-dtoa.h',
- '../../src/feedback-slots.h',
'../../src/field-index.h',
'../../src/field-index-inl.h',
'../../src/fixed-dtoa.cc',
@@ -832,7 +833,6 @@
'../../src/unicode-decoder.cc',
'../../src/unicode-decoder.h',
'../../src/unique.h',
- '../../src/uri.h',
'../../src/utils-inl.h',
'../../src/utils.cc',
'../../src/utils.h',
diff --git a/tools/push-to-trunk/git_recipes.py b/tools/push-to-trunk/git_recipes.py
index 7cfbeca..d57dc84 100644
--- a/tools/push-to-trunk/git_recipes.py
+++ b/tools/push-to-trunk/git_recipes.py
@@ -105,9 +105,10 @@
def GitBranch(self, **kwargs):
return self.Git("branch", **kwargs)
- def GitCreateBranch(self, name, branch="", **kwargs):
+ def GitCreateBranch(self, name, remote="", **kwargs):
assert name
- self.Git(MakeArgs(["checkout -b", name, branch]), **kwargs)
+ remote_args = ["--upstream", remote] if remote else []
+ self.Git(MakeArgs(["new-branch", name] + remote_args), **kwargs)
def GitDeleteBranch(self, name, **kwargs):
assert name
diff --git a/tools/push-to-trunk/test_scripts.py b/tools/push-to-trunk/test_scripts.py
index e0e1d86..cb96da7 100644
--- a/tools/push-to-trunk/test_scripts.py
+++ b/tools/push-to-trunk/test_scripts.py
@@ -761,7 +761,8 @@
Cmd("git svn fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git branch", " branch1\n* branch2\n"),
- Cmd("git checkout -b %s svn/bleeding_edge" % TEST_CONFIG["BRANCHNAME"],
+ Cmd(("git new-branch %s --upstream svn/bleeding_edge" %
+ TEST_CONFIG["BRANCHNAME"]),
""),
Cmd("git svn find-rev r123455", "push_hash\n"),
Cmd(("git log -1 --format=%H --grep="
@@ -798,8 +799,8 @@
Cmd("git checkout -f svn/bleeding_edge", ""),
Cmd("git diff svn/trunk push_hash", "patch content\n"),
Cmd("git svn find-rev push_hash", "123455\n"),
- Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG["TRUNKBRANCH"], "",
- cb=ResetToTrunk),
+ Cmd(("git new-branch %s --upstream svn/trunk" %
+ TEST_CONFIG["TRUNKBRANCH"]), "", cb=ResetToTrunk),
Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
Cmd("git checkout -f svn/trunk -- %s" % TEST_CONFIG["CHANGELOG_FILE"], "",
cb=ResetChangeLog),
@@ -908,7 +909,8 @@
Cmd("git svn fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git branch", " branch1\n* branch2\n"),
- Cmd("git checkout -b %s origin/master" % TEST_CONFIG["BRANCHNAME"],
+ Cmd(("git new-branch %s --upstream origin/master" %
+ TEST_CONFIG["BRANCHNAME"]),
""),
Cmd("git svn find-rev r123455", "push_hash\n"),
Cmd(("git log -1 --format=%H --grep="
@@ -931,7 +933,8 @@
Cmd("git checkout -f origin/master", ""),
Cmd("git diff origin/candidates push_hash", "patch content\n"),
Cmd("git svn find-rev push_hash", "123455\n"),
- Cmd("git checkout -b %s origin/candidates" % TEST_CONFIG["TRUNKBRANCH"],
+ Cmd(("git new-branch %s --upstream origin/candidates" %
+ TEST_CONFIG["TRUNKBRANCH"]),
"", cb=ResetToTrunk),
Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
Cmd("git checkout -f origin/candidates -- %s" %
@@ -1021,7 +1024,7 @@
Cmd("gclient sync --nohooks", "syncing...", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git fetch origin", ""),
- Cmd("git checkout -b v8-roll-push_hash", "", cwd=chrome_dir),
+ Cmd("git new-branch v8-roll-push_hash", "", cwd=chrome_dir),
Cmd("roll-dep v8 push_hash", "rolled", cb=WriteDeps, cwd=chrome_dir),
Cmd(("git commit -am \"Update V8 to version 3.22.5 "
"(based on bleeding_edge revision r22622).\n\n"
@@ -1223,7 +1226,8 @@
Cmd("git status -s -b -uno", "## some_branch\n"),
Cmd("git svn fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
- Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG["BRANCHNAME"], ""),
+ Cmd("git new-branch %s --upstream svn/trunk" % TEST_CONFIG["BRANCHNAME"],
+ ""),
Cmd(("git log --format=%H --grep=\"Port r12345\" "
"--reverse svn/bleeding_edge"),
"hash1\nhash2"),
@@ -1353,7 +1357,7 @@
Cmd("git fetch", ""),
Cmd("git svn fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
- Cmd("git checkout -b %s origin/candidates" %
+ Cmd("git new-branch %s --upstream origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"Port r12345\" "
"--reverse origin/master"),
@@ -1526,7 +1530,7 @@
Cmd("git fetch", ""),
Cmd("git svn fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
- Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], ""),
+ Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""),
Cmd("git branch -r", " branch-heads/3.21\n branch-heads/3.3\n"),
Cmd("git reset --hard branch-heads/3.3", ""),
Cmd("git log --format=%H", "hash1\nhash_234"),
@@ -1575,7 +1579,7 @@
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
- Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], "",
+ Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], "",
cwd=chrome_dir),
Cmd("git fetch origin", "", cwd=chrome_v8_dir),
Cmd("git log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\nc_hash3\n",
@@ -1720,7 +1724,7 @@
Cmd(("git log --format=%H --grep="
"\"^git-svn-id: [^@]*@12345 [A-Za-z0-9-]*$\""),
"lkgr_hash"),
- Cmd("git checkout -b auto-bump-up-version lkgr_hash", ""),
+ Cmd("git new-branch auto-bump-up-version --upstream lkgr_hash", ""),
Cmd("git checkout -f master", ""),
Cmd("git branch", "auto-bump-up-version\n* master"),
Cmd("git branch -D auto-bump-up-version", ""),
@@ -1729,7 +1733,7 @@
Cmd("git pull", ""),
URL("https://v8-status.appspot.com/current?format=json",
"{\"message\": \"Tree is open\"}"),
- Cmd("git checkout -b auto-bump-up-version master", "",
+ Cmd("git new-branch auto-bump-up-version --upstream master", "",
cb=ResetVersion(11, 4)),
Cmd("git commit -am \"[Auto-roll] Bump up version to 3.11.6.0\n\n"
"TBR=author@chromium.org\" "