Version 3.30.19 (based on bleeding_edge revision r24889)
Check string literals with escapes in PreParserTraits::GetSymbol() (issue 3606).
only define ARRAYSIZE_UNSAFE for NaCl builds (Chromium issue 405225).
Performance and stability improvements on all platforms.
Cr-Commit-Position: refs/heads/candidates@{#24890}
git-svn-id: https://v8.googlecode.com/svn/trunk@24890 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 7556d20..6e9f6c7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,13 @@
+2014-10-27: Version 3.30.19
+
+ Check string literals with escapes in PreParserTraits::GetSymbol()
+ (issue 3606).
+
+ only define ARRAYSIZE_UNSAFE for NaCl builds (Chromium issue 405225).
+
+ Performance and stability improvements on all platforms.
+
+
2014-10-24: Version 3.30.18
Narrow cases where Sparse/Smart versions of Array methods are used
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index da9e9f0..47d705f 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1131,6 +1131,7 @@
__ push(r0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(r0);
// Check for proxies.
@@ -1155,6 +1156,7 @@
__ bind(&call_runtime);
__ push(r0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1698,6 +1700,7 @@
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in r0.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 5105f1e..ceabe78 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2741,15 +2741,12 @@
int rs = instr->RsValue();
int32_t rs_val = get_register(rs);
int32_t ret_val = 0;
- DCHECK(rs_val != 0);
// udiv
if (instr->Bit(21) == 0x1) {
- ret_val = static_cast<int32_t>(static_cast<uint32_t>(rm_val) /
- static_cast<uint32_t>(rs_val));
- } else if ((rm_val == kMinInt) && (rs_val == -1)) {
- ret_val = kMinInt;
+ ret_val = bit_cast<int32_t>(base::bits::UnsignedDiv32(
+ bit_cast<uint32_t>(rm_val), bit_cast<uint32_t>(rs_val)));
} else {
- ret_val = rm_val / rs_val;
+ ret_val = base::bits::SignedDiv32(rm_val, rs_val);
}
set_register(rn, ret_val);
return;
diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc
index 1368266..0a8e1ca 100644
--- a/src/arm64/full-codegen-arm64.cc
+++ b/src/arm64/full-codegen-arm64.cc
@@ -1124,6 +1124,7 @@
__ Push(x0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ Bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(x0);
// Check for proxies.
@@ -1147,6 +1148,7 @@
__ Bind(&call_runtime);
__ Push(x0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1678,6 +1680,7 @@
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in x0.
diff --git a/src/array-iterator.js b/src/array-iterator.js
index 5ced9da..864d5c1 100644
--- a/src/array-iterator.js
+++ b/src/array-iterator.js
@@ -112,6 +112,8 @@
%FunctionSetName(ArrayIteratorIterator, '[Symbol.iterator]');
%AddNamedProperty(ArrayIterator.prototype, symbolIterator,
ArrayIteratorIterator, DONT_ENUM);
+ %AddNamedProperty(ArrayIterator.prototype, symbolToStringTag,
+ "Array Iterator", READ_ONLY | DONT_ENUM);
}
SetUpArrayIterator();
diff --git a/src/array.js b/src/array.js
index a4e681c..55dd797 100644
--- a/src/array.js
+++ b/src/array.js
@@ -284,11 +284,8 @@
function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = 0; i < del_count; i++) {
var index = start_i + i;
- // The spec could also be interpreted such that %HasOwnProperty
- // would be the appropriate test. We follow KJS in consulting the
- // prototype.
- var current = array[index];
- if (!IS_UNDEFINED(current) || index in array) {
+ if (index in array) {
+ var current = array[index];
// The spec requires [[DefineOwnProperty]] here, %AddElement is close
// enough (in that it ignores the prototype).
%AddElement(deleted_elements, i, current, NONE);
diff --git a/src/ast-value-factory.cc b/src/ast-value-factory.cc
index 4df6ac0..0a1949a 100644
--- a/src/ast-value-factory.cc
+++ b/src/ast-value-factory.cc
@@ -330,13 +330,23 @@
}
-const AstValue* AstValueFactory::NewBoolean(bool b) {
- AstValue* value = new (zone_) AstValue(b);
- if (isolate_) {
- value->Internalize(isolate_);
- }
- values_.Add(value);
+#define GENERATE_VALUE_GETTER(value, initializer) \
+ if (!value) { \
+ value = new (zone_) AstValue(initializer); \
+ if (isolate_) { \
+ value->Internalize(isolate_); \
+ } \
+ values_.Add(value); \
+ } \
return value;
+
+
+const AstValue* AstValueFactory::NewBoolean(bool b) {
+ if (b) {
+ GENERATE_VALUE_GETTER(true_value_, true);
+ } else {
+ GENERATE_VALUE_GETTER(false_value_, false);
+ }
}
@@ -352,35 +362,22 @@
const AstValue* AstValueFactory::NewNull() {
- AstValue* value = new (zone_) AstValue(AstValue::NULL_TYPE);
- if (isolate_) {
- value->Internalize(isolate_);
- }
- values_.Add(value);
- return value;
+ GENERATE_VALUE_GETTER(null_value_, AstValue::NULL_TYPE);
}
const AstValue* AstValueFactory::NewUndefined() {
- AstValue* value = new (zone_) AstValue(AstValue::UNDEFINED);
- if (isolate_) {
- value->Internalize(isolate_);
- }
- values_.Add(value);
- return value;
+ GENERATE_VALUE_GETTER(undefined_value_, AstValue::UNDEFINED);
}
const AstValue* AstValueFactory::NewTheHole() {
- AstValue* value = new (zone_) AstValue(AstValue::THE_HOLE);
- if (isolate_) {
- value->Internalize(isolate_);
- }
- values_.Add(value);
- return value;
+ GENERATE_VALUE_GETTER(the_hole_value_, AstValue::THE_HOLE);
}
+#undef GENERATE_VALUE_GETTER
+
const AstRawString* AstValueFactory::GetString(
uint32_t hash, bool is_one_byte, Vector<const byte> literal_bytes) {
// literal_bytes here points to whatever the user passed, and this is OK
diff --git a/src/ast-value-factory.h b/src/ast-value-factory.h
index de8a442..774e534 100644
--- a/src/ast-value-factory.h
+++ b/src/ast-value-factory.h
@@ -238,7 +238,7 @@
};
-// For generating string constants.
+// For generating constants.
#define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
@@ -268,6 +268,12 @@
F(use_strict, "use strict") \
F(value, "value")
+#define OTHER_CONSTANTS(F) \
+ F(true_value) \
+ F(false_value) \
+ F(null_value) \
+ F(undefined_value) \
+ F(the_hole_value)
class AstValueFactory {
public:
@@ -276,10 +282,12 @@
zone_(zone),
isolate_(NULL),
hash_seed_(hash_seed) {
-#define F(name, str) \
- name##_string_ = NULL;
+#define F(name, str) name##_string_ = NULL;
STRING_CONSTANTS(F)
#undef F
+#define F(name) name##_ = NULL;
+ OTHER_CONSTANTS(F)
+#undef F
}
Zone* zone() const { return zone_; }
@@ -299,15 +307,15 @@
return isolate_ != NULL;
}
-#define F(name, str) \
- const AstRawString* name##_string() { \
- if (name##_string_ == NULL) { \
- const char* data = str; \
- name##_string_ = GetOneByteString( \
+#define F(name, str) \
+ const AstRawString* name##_string() { \
+ if (name##_string_ == NULL) { \
+ const char* data = str; \
+ name##_string_ = GetOneByteString( \
Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), \
- static_cast<int>(strlen(data)))); \
- } \
- return name##_string_; \
+ static_cast<int>(strlen(data)))); \
+ } \
+ return name##_string_; \
}
STRING_CONSTANTS(F)
#undef F
@@ -338,10 +346,13 @@
uint32_t hash_seed_;
-#define F(name, str) \
- const AstRawString* name##_string_;
+#define F(name, str) const AstRawString* name##_string_;
STRING_CONSTANTS(F)
#undef F
+
+#define F(name) AstValue* name##_;
+ OTHER_CONSTANTS(F)
+#undef F
};
@@ -351,5 +362,6 @@
} } // namespace v8::internal
#undef STRING_CONSTANTS
+#undef OTHER_CONSTANTS
#endif // V8_AST_VALUE_FACTORY_H_
diff --git a/src/ast.h b/src/ast.h
index f997c44..ae7ec1a 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -957,9 +957,11 @@
ForInType for_in_type() const { return for_in_type_; }
void set_for_in_type(ForInType type) { for_in_type_ = type; }
- static int num_ids() { return parent_num_ids() + 2; }
+ static int num_ids() { return parent_num_ids() + 4; }
BailoutId BodyId() const { return BailoutId(local_id(0)); }
BailoutId PrepareId() const { return BailoutId(local_id(1)); }
+ BailoutId EnumId() const { return BailoutId(local_id(2)); }
+ BailoutId ToObjectId() const { return BailoutId(local_id(3)); }
virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
virtual BailoutId StackCheckId() const OVERRIDE { return BodyId(); }
@@ -1568,11 +1570,15 @@
};
struct Accessors: public ZoneObject {
- Accessors() : getter(NULL), setter(NULL) { }
+ Accessors() : getter(NULL), setter(NULL) {}
Expression* getter;
Expression* setter;
};
+ BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
+
+ static int num_ids() { return parent_num_ids() + 1; }
+
protected:
ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
int boilerplate_properties, bool has_function, int pos)
@@ -1582,8 +1588,10 @@
fast_elements_(false),
may_store_doubles_(false),
has_function_(has_function) {}
+ static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
private:
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
int boilerplate_properties_;
diff --git a/src/base/bits.cc b/src/base/bits.cc
index 2818b93..74d747f 100644
--- a/src/base/bits.cc
+++ b/src/base/bits.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/base/bits.h"
+
+#include <limits>
+
#include "src/base/logging.h"
namespace v8 {
@@ -32,6 +35,19 @@
bit_cast<uint32_t>(SignedMulHigh32(lhs, rhs)));
}
+
+int32_t SignedDiv32(int32_t lhs, int32_t rhs) {
+ if (rhs == 0) return 0;
+ if (rhs == -1) return -lhs;
+ return lhs / rhs;
+}
+
+
+int32_t SignedMod32(int32_t lhs, int32_t rhs) {
+ if (rhs == 0 || rhs == -1) return 0;
+ return lhs % rhs;
+}
+
} // namespace bits
} // namespace base
} // namespace v8
diff --git a/src/base/bits.h b/src/base/bits.h
index da4d1f2..0f4d4c7 100644
--- a/src/base/bits.h
+++ b/src/base/bits.h
@@ -199,6 +199,32 @@
// adds the accumulate value |acc|.
int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc);
+
+// SignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
+// truncated to int32. If |rhs| is zero, then zero is returned. If |lhs|
+// is minint and |rhs| is -1, it returns minint.
+int32_t SignedDiv32(int32_t lhs, int32_t rhs);
+
+
+// SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
+// truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs|
+// is -1, it returns zero.
+int32_t SignedMod32(int32_t lhs, int32_t rhs);
+
+
+// UnsignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
+// truncated to uint32. If |rhs| is zero, then zero is returned.
+inline uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs) {
+ return rhs ? lhs / rhs : 0u;
+}
+
+
+// UnsignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
+// truncated to uint32. If |rhs| is zero, then zero is returned.
+inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
+ return rhs ? lhs % rhs : 0u;
+}
+
} // namespace bits
} // namespace base
} // namespace v8
diff --git a/src/base/flags.h b/src/base/flags.h
index 3f4dfe7..060dba8 100644
--- a/src/base/flags.h
+++ b/src/base/flags.h
@@ -26,8 +26,9 @@
typedef S mask_type;
Flags() : mask_(0) {}
- Flags(flag_type flag) : mask_(flag) {} // NOLINT(runtime/explicit)
- explicit Flags(mask_type mask) : mask_(mask) {}
+ Flags(flag_type flag) // NOLINT(runtime/explicit)
+ : mask_(static_cast<S>(flag)) {}
+ explicit Flags(mask_type mask) : mask_(static_cast<S>(mask)) {}
Flags& operator&=(const Flags& flags) {
mask_ &= flags.mask_;
diff --git a/src/base/macros.h b/src/base/macros.h
index 79cf04c..80a8949 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -25,6 +25,8 @@
(reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
+#if V8_OS_NACL
+
// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
// but can be used on anonymous types or types defined inside
// functions. It's less safe than arraysize as it accepts some
@@ -65,9 +67,6 @@
((sizeof(a) / sizeof(*(a))) / \
static_cast<size_t>(!(sizeof(a) % sizeof(*(a))))) // NOLINT
-
-#if V8_OS_NACL
-
// TODO(bmeurer): For some reason, the NaCl toolchain cannot handle the correct
// definition of arraysize() below, so we have to use the unsafe version for
// now.
@@ -398,4 +397,22 @@
return RoundDown<T>(static_cast<T>(x + m - 1), m);
}
+
+namespace v8 {
+namespace base {
+
+// TODO(yangguo): This is a poor man's replacement for std::is_fundamental,
+// which requires C++11. Switch to std::is_fundamental once possible.
+template <typename T>
+inline bool is_fundamental() {
+ return false;
+}
+
+template <>
+inline bool is_fundamental<uint8_t>() {
+ return true;
+}
+}
+} // namespace v8::base
+
#endif // V8_BASE_MACROS_H_
diff --git a/src/collection-iterator.js b/src/collection-iterator.js
index 2bccc8d..92d45a9 100644
--- a/src/collection-iterator.js
+++ b/src/collection-iterator.js
@@ -77,6 +77,8 @@
%FunctionSetName(SetIteratorSymbolIterator, '[Symbol.iterator]');
%AddNamedProperty(SetIterator.prototype, symbolIterator,
SetIteratorSymbolIterator, DONT_ENUM);
+ %AddNamedProperty(SetIterator.prototype, symbolToStringTag,
+ "Set Iterator", READ_ONLY | DONT_ENUM);
}
SetUpSetIterator();
@@ -174,6 +176,8 @@
%FunctionSetName(MapIteratorSymbolIterator, '[Symbol.iterator]');
%AddNamedProperty(MapIterator.prototype, symbolIterator,
MapIteratorSymbolIterator, DONT_ENUM);
+ %AddNamedProperty(MapIterator.prototype, symbolToStringTag,
+ "Map Iterator", READ_ONLY | DONT_ENUM);
}
SetUpMapIterator();
diff --git a/src/compiler.cc b/src/compiler.cc
index 3cfc193..92331c6 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -414,9 +414,6 @@
compiler::Pipeline pipeline(info());
pipeline.GenerateCode();
if (!info()->code().is_null()) {
- if (FLAG_turbo_deoptimization) {
- info()->context()->native_context()->AddOptimizedCode(*info()->code());
- }
return SetLastStatus(SUCCEEDED);
}
}
@@ -485,6 +482,9 @@
DCHECK(last_status() == SUCCEEDED);
// TODO(turbofan): Currently everything is done in the first phase.
if (!info()->code().is_null()) {
+ if (FLAG_turbo_deoptimization) {
+ info()->context()->native_context()->AddOptimizedCode(*info()->code());
+ }
RecordOptimizationStats();
return last_status();
}
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index ec0c5b8..95e1bde 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -1135,6 +1135,12 @@
VisitFloat64Compare(this, node, &cont);
}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::Flag::kNoFlags;
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 3d27d8d..fd062f2 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -982,16 +982,16 @@
// Shared routine for multiple word compare operations.
static void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
- bool commutative) {
+ bool commutative, ImmediateMode immediate_mode) {
Arm64OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
// Match immediates on left or right side of comparison.
- if (g.CanBeImmediate(right, kArithmeticImm)) {
+ if (g.CanBeImmediate(right, immediate_mode)) {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
cont);
- } else if (g.CanBeImmediate(left, kArithmeticImm)) {
+ } else if (g.CanBeImmediate(left, immediate_mode)) {
if (!commutative) cont->Commute();
VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
cont);
@@ -1004,7 +1004,7 @@
static void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitWordCompare(selector, node, kArm64Cmp32, cont, false);
+ VisitWordCompare(selector, node, kArm64Cmp32, cont, false, kArithmeticImm);
}
@@ -1098,16 +1098,20 @@
return VisitWord32Compare(this, value, &cont);
case IrOpcode::kWord64Equal:
cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(this, value, kArm64Cmp, &cont, false);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ kArithmeticImm);
case IrOpcode::kInt64LessThan:
cont.OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(this, value, kArm64Cmp, &cont, false);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ kArithmeticImm);
case IrOpcode::kInt64LessThanOrEqual:
cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(this, value, kArm64Cmp, &cont, false);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ kArithmeticImm);
case IrOpcode::kUint64LessThan:
cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(this, value, kArm64Cmp, &cont, false);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ kArithmeticImm);
case IrOpcode::kFloat64Equal:
cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat64Compare(this, value, &cont);
@@ -1145,11 +1149,14 @@
}
break;
case IrOpcode::kInt32Add:
- return VisitWordCompare(this, value, kArm64Cmn32, &cont, true);
+ return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
+ kArithmeticImm);
case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, value, kArm64Cmp32, &cont, false);
+ return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
+ kArithmeticImm);
case IrOpcode::kWord32And:
- return VisitWordCompare(this, value, kArm64Tst32, &cont, true);
+ return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
+ kLogical32Imm);
default:
break;
}
@@ -1169,11 +1176,14 @@
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt32Add:
- return VisitWordCompare(this, value, kArm64Cmn32, &cont, true);
+ return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
+ kArithmeticImm);
case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, value, kArm64Cmp32, &cont, false);
+ return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
+ kArithmeticImm);
case IrOpcode::kWord32And:
- return VisitWordCompare(this, value, kArm64Tst32, &cont, true);
+ return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
+ kLogical32Imm);
default:
break;
}
@@ -1217,14 +1227,15 @@
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord64And:
- return VisitWordCompare(this, value, kArm64Tst, &cont, true);
+ return VisitWordCompare(this, value, kArm64Tst, &cont, true,
+ kLogical64Imm);
default:
break;
}
return VisitWord64Test(this, value, &cont);
}
}
- VisitWordCompare(this, node, kArm64Cmp, &cont, false);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
@@ -1252,19 +1263,19 @@
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
- VisitWordCompare(this, node, kArm64Cmp, &cont, false);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kSignedLessThanOrEqual, node);
- VisitWordCompare(this, node, kArm64Cmp, &cont, false);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont(kUnsignedLessThan, node);
- VisitWordCompare(this, node, kArm64Cmp, &cont, false);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
@@ -1285,6 +1296,12 @@
VisitFloat64Compare(this, node, &cont);
}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::Flag::kNoFlags;
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index 50a3720..b972601 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -642,11 +642,14 @@
// Convert object to jsobject.
// PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
obj = NewNode(javascript()->ToObject(), obj);
+ PrepareFrameState(obj, stmt->ToObjectId(), OutputFrameStateCombine::Push());
environment()->Push(obj);
// TODO(dcarney): should do a fast enum cache check here to skip runtime.
environment()->Push(obj);
Node* cache_type = ProcessArguments(
javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), 1);
+ PrepareFrameState(cache_type, stmt->EnumId(),
+ OutputFrameStateCombine::Push());
// TODO(dcarney): these next runtime calls should be removed in favour of
// a few simplified instructions.
environment()->Push(obj);
@@ -882,6 +885,8 @@
const Operator* op =
javascript()->CallRuntime(Runtime::kCreateObjectLiteral, 4);
Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+ PrepareFrameState(literal, expr->CreateLiteralId(),
+ OutputFrameStateCombine::Push());
// The object is expected on the operand stack during computation of the
// property values and is the value of the entire expression.
@@ -943,7 +948,10 @@
if (property->emit_store()) {
const Operator* op =
javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
- NewNode(op, receiver, value);
+ Node* set_prototype = NewNode(op, receiver, value);
+ // SetPrototype should not lazy deopt on an object
+ // literal.
+ PrepareFrameState(set_prototype, BailoutId::None());
}
break;
}
@@ -970,7 +978,8 @@
const Operator* op =
javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
Node* call = NewNode(op, literal, name, getter, setter, attr);
- PrepareFrameState(call, it->first->id());
+ // This should not lazy deopt on a new literal.
+ PrepareFrameState(call, BailoutId::None());
}
// Transform literals that contain functions to fast properties.
@@ -1237,7 +1246,7 @@
receiver_value = NewNode(common()->Projection(1), pair);
PrepareFrameState(pair, expr->EvalOrLookupId(),
- OutputFrameStateCombine::Push());
+ OutputFrameStateCombine::Push(2));
break;
}
case Call::PROPERTY_CALL: {
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index c59ca49..9b077c3 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -67,6 +67,10 @@
return kind_ == kPushOutput && parameter_ == 0;
}
+ size_t ConsumedOutputCount() const {
+ return kind_ == kPushOutput ? GetPushCount() : 1;
+ }
+
bool operator==(OutputFrameStateCombine const& other) const {
return kind_ == other.kind_ && parameter_ == other.parameter_;
}
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 894794b..ca4bf1b 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -877,6 +877,12 @@
VisitFloat64Compare(this, node, &cont);
}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::Flag::kNoFlags;
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index b545959..53d508a 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -128,6 +128,12 @@
return ImmediateOperand::Create(index, zone());
}
+ InstructionOperand* TempLocation(LinkageLocation location, MachineType type) {
+ UnallocatedOperand* op = ToUnallocatedOperand(location, type);
+ op->set_virtual_register(sequence()->NextVirtualRegister());
+ return op;
+ }
+
InstructionOperand* Label(BasicBlock* block) {
// TODO(bmeurer): We misuse ImmediateOperand here.
return TempImmediate(block->rpo_number());
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 17c227a..4baa595 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -219,6 +219,23 @@
}
+void InstructionSelector::MarkAsRepresentation(MachineType rep,
+ InstructionOperand* op) {
+ UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
+ switch (RepresentationOf(rep)) {
+ case kRepFloat32:
+ case kRepFloat64:
+ sequence()->MarkAsDouble(unalloc->virtual_register());
+ break;
+ case kRepTagged:
+ sequence()->MarkAsReference(unalloc->virtual_register());
+ break;
+ default:
+ break;
+ }
+}
+
+
void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
DCHECK_NOT_NULL(node);
switch (RepresentationOf(rep)) {
@@ -274,15 +291,27 @@
}
// Filter out the outputs that aren't live because no projection uses them.
+ size_t outputs_needed_by_framestate =
+ buffer->frame_state_descriptor == NULL
+ ? 0
+ : buffer->frame_state_descriptor->state_combine()
+ .ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
- if (buffer->output_nodes[i] != NULL) {
- Node* output = buffer->output_nodes[i];
+ bool output_is_live =
+ buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
+ if (output_is_live) {
MachineType type =
buffer->descriptor->GetReturnType(static_cast<int>(i));
LinkageLocation location =
buffer->descriptor->GetReturnLocation(static_cast<int>(i));
- MarkAsRepresentation(type, output);
- buffer->outputs.push_back(g.DefineAsLocation(output, location, type));
+
+ Node* output = buffer->output_nodes[i];
+ InstructionOperand* op =
+ output == NULL ? g.TempLocation(location, type)
+ : g.DefineAsLocation(output, location, type);
+ MarkAsRepresentation(type, op);
+
+ buffer->outputs.push_back(op);
}
}
}
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index cc06f10..5af2c9d 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -85,6 +85,9 @@
return Features(CpuFeatures::SupportedFeatures());
}
+ // TODO(sigurds) This should take a CpuFeatures argument.
+ static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
+
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -136,6 +139,10 @@
// by {node}.
void MarkAsRepresentation(MachineType rep, Node* node);
+ // Inform the register allocation of the representation of the unallocated
+ // operand {op}.
+ void MarkAsRepresentation(MachineType rep, InstructionOperand* op);
+
// Initialize the call buffer with the InstructionOperands, nodes, etc,
// corresponding
// to the inputs and outputs of the call.
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 7f49faf..9240b06 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -275,8 +275,7 @@
const LoadPropertyParameters& p = LoadPropertyParametersOf(node->op());
Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
if (FLAG_vector_ics) {
- PatchInsertInput(node, 2,
- jsgraph()->SmiConstant(p.feedback().slot().ToInt()));
+ PatchInsertInput(node, 2, jsgraph()->SmiConstant(p.feedback().index()));
PatchInsertInput(node, 3, jsgraph()->HeapConstant(p.feedback().vector()));
}
ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
@@ -289,8 +288,7 @@
CodeFactory::LoadICInOptimizedCode(isolate(), p.contextual_mode());
PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name()));
if (FLAG_vector_ics) {
- PatchInsertInput(node, 2,
- jsgraph()->SmiConstant(p.feedback().slot().ToInt()));
+ PatchInsertInput(node, 2, jsgraph()->SmiConstant(p.feedback().index()));
PatchInsertInput(node, 3, jsgraph()->HeapConstant(p.feedback().vector()));
}
ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index b659292..1ce8173 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -104,6 +104,8 @@
Handle<TypeFeedbackVector> vector() const { return vector_; }
FeedbackVectorICSlot slot() const { return slot_; }
+ int index() const { return vector_->GetIndex(slot_); }
+
private:
const Handle<TypeFeedbackVector> vector_;
const FeedbackVectorICSlot slot_;
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 5b7bc68..4239f9f 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -125,6 +125,7 @@
case Runtime::kCompileLazy:
case Runtime::kCompileOptimized:
case Runtime::kCompileString:
+ case Runtime::kCreateObjectLiteral:
case Runtime::kDebugBreak:
case Runtime::kDataViewSetInt8:
case Runtime::kDataViewSetUint8:
@@ -143,23 +144,38 @@
case Runtime::kDataViewGetFloat32:
case Runtime::kDataViewGetFloat64:
case Runtime::kDebugEvaluate:
+ case Runtime::kDebugEvaluateGlobal:
case Runtime::kDebugGetLoadedScripts:
case Runtime::kDebugGetPropertyDetails:
case Runtime::kDebugPromiseEvent:
+ case Runtime::kDefineAccessorPropertyUnchecked:
+ case Runtime::kDefineDataPropertyUnchecked:
case Runtime::kDeleteProperty:
case Runtime::kDeoptimizeFunction:
case Runtime::kFunctionBindArguments:
+ case Runtime::kGetDefaultReceiver:
case Runtime::kGetFrameCount:
case Runtime::kGetOwnProperty:
+ case Runtime::kGetOwnPropertyNames:
+ case Runtime::kGetPropertyNamesFast:
+ case Runtime::kGetPrototype:
+ case Runtime::kInlineArguments:
case Runtime::kInlineCallFunction:
case Runtime::kInlineDateField:
case Runtime::kInlineRegExpExec:
+ case Runtime::kInternalSetPrototype:
+ case Runtime::kInterrupt:
+ case Runtime::kIsPropertyEnumerable:
+ case Runtime::kIsSloppyModeFunction:
case Runtime::kLiveEditGatherCompileInfo:
case Runtime::kLoadLookupSlot:
case Runtime::kLoadLookupSlotNoReferenceError:
case Runtime::kMaterializeRegExpLiteral:
+ case Runtime::kNewObject:
case Runtime::kNewObjectFromBound:
+ case Runtime::kNewObjectWithAllocationSite:
case Runtime::kObjectFreeze:
+ case Runtime::kOwnKeys:
case Runtime::kParseJson:
case Runtime::kPrepareStep:
case Runtime::kPreventExtensions:
@@ -168,22 +184,28 @@
case Runtime::kRegExpCompile:
case Runtime::kRegExpExecMultiple:
case Runtime::kResolvePossiblyDirectEval:
- // case Runtime::kSetPrototype:
+ case Runtime::kSetPrototype:
case Runtime::kSetScriptBreakPoint:
+ case Runtime::kSparseJoinWithSeparator:
case Runtime::kStackGuard:
+ case Runtime::kStoreKeyedToSuper_Sloppy:
+ case Runtime::kStoreKeyedToSuper_Strict:
+ case Runtime::kStoreToSuper_Sloppy:
+ case Runtime::kStoreToSuper_Strict:
case Runtime::kStoreLookupSlot:
case Runtime::kStringBuilderConcat:
+ case Runtime::kStringBuilderJoin:
case Runtime::kStringReplaceGlobalRegExpWithString:
+ case Runtime::kThrowNonMethodError:
+ case Runtime::kThrowNotDateError:
case Runtime::kThrowReferenceError:
+ case Runtime::kThrowUnsupportedSuperError:
case Runtime::kThrow:
case Runtime::kTypedArraySetFastCases:
case Runtime::kTypedArrayInitializeFromArrayLike:
- case Runtime::kDebugEvaluateGlobal:
- case Runtime::kOwnKeys:
- case Runtime::kGetOwnPropertyNames:
- case Runtime::kIsPropertyEnumerable:
- case Runtime::kGetPrototype:
- case Runtime::kSparseJoinWithSeparator:
+#ifdef V8_I18N_SUPPORT
+ case Runtime::kGetImplFromInitializedIntlObject:
+#endif
return true;
default:
return false;
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index a8b5edd..f285b8a 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -58,6 +58,11 @@
}
+Node* MachineOperatorReducer::Word32Equal(Node* lhs, Node* rhs) {
+ return graph()->NewNode(machine()->Word32Equal(), lhs, rhs);
+}
+
+
Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) {
return graph()->NewNode(machine()->Int32Add(), lhs, rhs);
}
@@ -299,40 +304,12 @@
}
case IrOpcode::kInt32Div:
return ReduceInt32Div(node);
- case IrOpcode::kUint32Div: {
- Uint32BinopMatcher m(node);
- if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
- // TODO(turbofan): if (m.left().Is(0))
- // TODO(turbofan): if (m.right().Is(0))
- // TODO(turbofan): if (m.LeftEqualsRight())
- if (m.IsFoldable() && !m.right().Is(0)) { // K / K => K
- return ReplaceInt32(m.left().Value() / m.right().Value());
- }
- if (m.right().IsPowerOf2()) { // x / 2^n => x >> n
- node->set_op(machine()->Word32Shr());
- node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
- return Changed(node);
- }
- break;
- }
+ case IrOpcode::kUint32Div:
+ return ReduceUint32Div(node);
case IrOpcode::kInt32Mod:
return ReduceInt32Mod(node);
- case IrOpcode::kUint32Mod: {
- Uint32BinopMatcher m(node);
- if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
- // TODO(turbofan): if (m.left().Is(0))
- // TODO(turbofan): if (m.right().Is(0))
- // TODO(turbofan): if (m.LeftEqualsRight())
- if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K
- return ReplaceInt32(m.left().Value() % m.right().Value());
- }
- if (m.right().IsPowerOf2()) { // x % 2^n => x & 2^n-1
- node->set_op(machine()->Word32And());
- node->ReplaceInput(1, Int32Constant(m.right().Value() - 1));
- return Changed(node);
- }
- break;
- }
+ case IrOpcode::kUint32Mod:
+ return ReduceUint32Mod(node);
case IrOpcode::kInt32LessThan: {
Int32BinopMatcher m(node);
if (m.IsFoldable()) { // K < K => K
@@ -554,13 +531,16 @@
Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
Int32BinopMatcher m(node);
+ if (m.left().Is(0)) return Replace(m.left().node()); // 0 / x => 0
if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
- // TODO(turbofan): if (m.left().Is(0))
- // TODO(turbofan): if (m.LeftEqualsRight())
- if (m.IsFoldable() && !m.right().Is(0)) { // K / K => K
- if (m.right().Is(-1)) return ReplaceInt32(-m.left().Value());
- return ReplaceInt32(m.left().Value() / m.right().Value());
+ if (m.IsFoldable()) { // K / K => K
+ return ReplaceInt32(
+ base::bits::SignedDiv32(m.left().Value(), m.right().Value()));
+ }
+ if (m.LeftEqualsRight()) { // x / x => x != 0
+ Node* const zero = Int32Constant(0);
+ return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
}
if (m.right().Is(-1)) { // x / -1 => 0 - x
node->set_op(machine()->Int32Sub());
@@ -595,15 +575,38 @@
}
+Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
+ Uint32BinopMatcher m(node);
+ if (m.left().Is(0)) return Replace(m.left().node()); // 0 / x => 0
+ if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
+ if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
+ if (m.IsFoldable()) { // K / K => K
+ return ReplaceUint32(
+ base::bits::UnsignedDiv32(m.left().Value(), m.right().Value()));
+ }
+ if (m.LeftEqualsRight()) { // x / x => x != 0
+ Node* const zero = Int32Constant(0);
+ return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
+ }
+ if (m.right().IsPowerOf2()) { // x / 2^n => x >> n
+ node->set_op(machine()->Word32Shr());
+ node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
Int32BinopMatcher m(node);
- if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
- if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0
- // TODO(turbofan): if (m.left().Is(0))
- // TODO(turbofan): if (m.right().Is(0))
- // TODO(turbofan): if (m.LeftEqualsRight())
- if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K
- return ReplaceInt32(m.left().Value() % m.right().Value());
+ if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0
+ if (m.right().Is(0)) return Replace(m.right().node()); // x % 0 => 0
+ if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
+ if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0
+ if (m.LeftEqualsRight()) return ReplaceInt32(0); // x % x => 0
+ if (m.IsFoldable()) { // K % K => K
+ return ReplaceInt32(
+ base::bits::SignedMod32(m.left().Value(), m.right().Value()));
}
if (m.right().HasValue()) {
Node* const dividend = m.left().node();
@@ -639,6 +642,25 @@
}
+Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
+ Uint32BinopMatcher m(node);
+ if (m.left().Is(0)) return Replace(m.left().node()); // 0 % x => 0
+ if (m.right().Is(0)) return Replace(m.right().node()); // x % 0 => 0
+ if (m.right().Is(1)) return ReplaceUint32(0); // x % 1 => 0
+ if (m.LeftEqualsRight()) return ReplaceInt32(0); // x % x => 0
+ if (m.IsFoldable()) { // K % K => K
+ return ReplaceUint32(
+ base::bits::UnsignedMod32(m.left().Value(), m.right().Value()));
+ }
+ if (m.right().IsPowerOf2()) { // x % 2^n => x & 2^n-1
+ node->set_op(machine()->Word32And());
+ node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow: {
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index 92c2337..475ed2e 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -37,6 +37,7 @@
Node* Word32And(Node* lhs, uint32_t rhs);
Node* Word32Sar(Node* lhs, uint32_t rhs);
Node* Word32Shr(Node* lhs, uint32_t rhs);
+ Node* Word32Equal(Node* lhs, Node* rhs);
Node* Int32Add(Node* lhs, Node* rhs);
Node* Int32Sub(Node* lhs, Node* rhs);
Node* Int32Mul(Node* lhs, Node* rhs);
@@ -53,12 +54,17 @@
Reduction ReplaceInt32(int32_t value) {
return Replace(Int32Constant(value));
}
+ Reduction ReplaceUint32(uint32_t value) {
+ return Replace(Uint32Constant(value));
+ }
Reduction ReplaceInt64(int64_t value) {
return Replace(Int64Constant(value));
}
Reduction ReduceInt32Div(Node* node);
+ Reduction ReduceUint32Div(Node* node);
Reduction ReduceInt32Mod(Node* node);
+ Reduction ReduceUint32Mod(Node* node);
Reduction ReduceProjection(size_t index, Node* node);
Graph* graph() const;
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index b5da829..6e1180f 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -110,6 +110,10 @@
V(Float64Div, Operator::kNoProperties, 2, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 1) \
V(Float64Sqrt, Operator::kNoProperties, 1, 1) \
+ V(Float64Ceil, Operator::kNoProperties, 1, 1) \
+ V(Float64Floor, Operator::kNoProperties, 1, 1) \
+ V(Float64RoundTruncate, Operator::kNoProperties, 1, 1) \
+ V(Float64RoundTiesAway, Operator::kNoProperties, 1, 1) \
V(Float64Equal, Operator::kCommutative, 2, 1) \
V(Float64LessThan, Operator::kNoProperties, 2, 1) \
V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 1) \
@@ -188,8 +192,8 @@
LAZY_INSTANCE_INITIALIZER;
-MachineOperatorBuilder::MachineOperatorBuilder(MachineType word)
- : impl_(kImpl.Get()), word_(word) {
+MachineOperatorBuilder::MachineOperatorBuilder(MachineType word, Flags flags)
+ : impl_(kImpl.Get()), word_(word), flags_(flags) {
DCHECK(word == kRepWord32 || word == kRepWord64);
}
@@ -236,7 +240,6 @@
UNREACHABLE();
return NULL;
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 568d3eb..1951446 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_MACHINE_OPERATOR_H_
#define V8_COMPILER_MACHINE_OPERATOR_H_
+#include "src/base/flags.h"
#include "src/compiler/machine-type.h"
namespace v8 {
@@ -57,7 +58,19 @@
// for generating code to run on architectures such as ia32, x64, arm, etc.
class MachineOperatorBuilder FINAL {
public:
- explicit MachineOperatorBuilder(MachineType word = kMachPtr);
+ // Flags that specify which operations are available. This is useful
+ // for operations that are unsupported by some back-ends.
+ enum class Flag : unsigned {
+ kNoFlags = 0,
+ kFloat64Floor = 1 << 0,
+ kFloat64Ceil = 1 << 1,
+ kFloat64RoundTruncate = 1 << 2,
+ kFloat64RoundTiesAway = 1 << 3
+ };
+ typedef base::Flags<Flag, unsigned> Flags;
+
+ explicit MachineOperatorBuilder(MachineType word = kMachPtr,
+ Flags supportedOperators = Flag::kNoFlags);
const Operator* Word32And();
const Operator* Word32Or();
@@ -135,6 +148,20 @@
const Operator* Float64LessThan();
const Operator* Float64LessThanOrEqual();
+ // Floating point rounding.
+ const Operator* Float64Floor();
+ const Operator* Float64Ceil();
+ const Operator* Float64RoundTruncate();
+ const Operator* Float64RoundTiesAway();
+ bool HasFloat64Floor() { return flags_ & Flag::kFloat64Floor; }
+ bool HasFloat64Ceil() { return flags_ & Flag::kFloat64Ceil; }
+ bool HasFloat64RoundTruncate() {
+ return flags_ & Flag::kFloat64RoundTruncate;
+ }
+ bool HasFloat64RoundTiesAway() {
+ return flags_ & Flag::kFloat64RoundTiesAway;
+ }
+
// load [base + index]
const Operator* Load(LoadRepresentation rep);
@@ -181,8 +208,11 @@
private:
const MachineOperatorBuilderImpl& impl_;
const MachineType word_;
+ const Flags flags_;
};
+
+DEFINE_OPERATORS_FOR_FLAGS(MachineOperatorBuilder::Flags)
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 5ad9930..dc1749a 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -647,6 +647,12 @@
VisitFloat64Compare(this, node, &cont);
}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::Flag::kNoFlags;
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index cbc0d07..4853110 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -226,6 +226,10 @@
V(Float64Equal) \
V(Float64LessThan) \
V(Float64LessThanOrEqual) \
+ V(Float64Floor) \
+ V(Float64Ceil) \
+ V(Float64RoundTruncate) \
+ V(Float64RoundTiesAway) \
V(LoadStackPointer)
#define VALUE_OP_LIST(V) \
diff --git a/src/compiler/operator-properties-inl.h b/src/compiler/operator-properties-inl.h
index 4d85171..771f560 100644
--- a/src/compiler/operator-properties-inl.h
+++ b/src/compiler/operator-properties-inl.h
@@ -81,6 +81,9 @@
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSSubtract:
+ // Conversions
+ case IrOpcode::kJSToObject:
+
// Other
case IrOpcode::kJSDeleteProperty:
return true;
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 3b33744..a46686b 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -178,7 +178,8 @@
// construction. This is currently only needed for the node cache, which the
// typer could sweep over later.
Typer typer(&graph, info()->context());
- MachineOperatorBuilder machine;
+ MachineOperatorBuilder machine(
+ kMachPtr, InstructionSelector::SupportedMachineOperatorFlags());
CommonOperatorBuilder common(zone());
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(&graph, &common, &javascript, &machine);
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index ede6eaa..51400cf 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -51,7 +51,6 @@
void BasicBlock::set_control(Control control) {
- DCHECK(control_ == BasicBlock::kNone);
control_ = control;
}
@@ -215,12 +214,42 @@
}
+void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
+ BasicBlock* tblock, BasicBlock* fblock) {
+ DCHECK(block->control() != BasicBlock::kNone);
+ DCHECK(end->control() == BasicBlock::kNone);
+ end->set_control(block->control());
+ block->set_control(BasicBlock::kBranch);
+ MoveSuccessors(block, end);
+ AddSuccessor(block, tblock);
+ AddSuccessor(block, fblock);
+ if (block->control_input() != NULL) {
+ SetControlInput(end, block->control_input());
+ }
+ SetControlInput(block, branch);
+}
+
+
void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
block->AddSuccessor(succ);
succ->AddPredecessor(block);
}
+void Schedule::MoveSuccessors(BasicBlock* from, BasicBlock* to) {
+ for (BasicBlock::Predecessors::iterator i = from->successors_begin();
+ i != from->successors_end(); ++i) {
+ BasicBlock* succ = *i;
+ to->AddSuccessor(succ);
+ for (BasicBlock::Predecessors::iterator j = succ->predecessors_begin();
+ j != succ->predecessors_end(); ++j) {
+ if (*j == from) *j = to;
+ }
+ }
+ from->ClearSuccessors();
+}
+
+
void Schedule::SetControlInput(BasicBlock* block, Node* node) {
block->set_control_input(node);
SetBlockForNode(block, node);
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index 5ff554f..e6076ce 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -95,6 +95,7 @@
}
size_t PredecessorCount() const { return predecessors_.size(); }
BasicBlock* PredecessorAt(size_t index) { return predecessors_[index]; }
+ void ClearPredecessors() { predecessors_.clear(); }
void AddPredecessor(BasicBlock* predecessor);
typedef ZoneVector<BasicBlock*> Successors;
@@ -108,6 +109,7 @@
}
size_t SuccessorCount() const { return successors_.size(); }
BasicBlock* SuccessorAt(size_t index) { return successors_[index]; }
+ void ClearSuccessors() { successors_.clear(); }
void AddSuccessor(BasicBlock* successor);
// Nodes in the basic block.
@@ -240,7 +242,14 @@
// BasicBlock building: add a throw at the end of {block}.
void AddThrow(BasicBlock* block, Node* input);
- void AddSuccessor(BasicBlock* block, BasicBlock* succ);
+ // BasicBlock mutation: insert a branch into the end of {block}.
+ void InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
+ BasicBlock* tblock, BasicBlock* fblock);
+
+ // Exposed publicly for testing only.
+ void AddSuccessorForTesting(BasicBlock* block, BasicBlock* succ) {
+ return AddSuccessor(block, succ);
+ }
BasicBlockVector* rpo_order() { return &rpo_order_; }
const BasicBlockVector* rpo_order() const { return &rpo_order_; }
@@ -256,6 +265,9 @@
friend class ScheduleVisualizer;
friend class BasicBlockInstrumentor;
+ void AddSuccessor(BasicBlock* block, BasicBlock* succ);
+ void MoveSuccessors(BasicBlock* from, BasicBlock* to);
+
void SetControlInput(BasicBlock* block, Node* node);
void SetBlockForNode(BasicBlock* block, Node* node);
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index 6975882..a75a9ef 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -239,6 +239,7 @@
switch (node->opcode()) {
case IrOpcode::kLoop:
case IrOpcode::kMerge:
+ case IrOpcode::kTerminate:
BuildBlockForNode(node);
break;
case IrOpcode::kBranch:
@@ -561,15 +562,26 @@
BasicBlockVector* final_order = schedule_->rpo_order();
order->Serialize(final_order);
- // Compute the correct loop header for every block and set the correct loop
- // ends.
+ // Compute the correct loop headers and set the correct loop ends.
LoopInfo* current_loop = NULL;
BasicBlock* current_header = NULL;
int loop_depth = 0;
for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
++i) {
BasicBlock* current = *i;
+
+ // Finish the previous loop(s) if we just exited them.
+ while (current_header != NULL &&
+ current->rpo_number() >= current_header->loop_end()) {
+ DCHECK(current_header->IsLoopHeader());
+ DCHECK(current_loop != NULL);
+ current_loop = current_loop->prev;
+ current_header = current_loop == NULL ? NULL : current_loop->header;
+ --loop_depth;
+ }
current->set_loop_header(current_header);
+
+ // Push a new loop onto the stack if this loop is a loop header.
if (current->IsLoopHeader()) {
loop_depth++;
current_loop = &loops[current->loop_end()];
@@ -580,17 +592,10 @@
current_header = current_loop->header;
Trace("B%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
- } else {
- while (current_header != NULL &&
- current->rpo_number() >= current_header->loop_end()) {
- DCHECK(current_header->IsLoopHeader());
- DCHECK(current_loop != NULL);
- current_loop = current_loop->prev;
- current_header = current_loop == NULL ? NULL : current_loop->header;
- --loop_depth;
- }
}
+
current->set_loop_depth(loop_depth);
+
if (current->loop_header() == NULL) {
Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(),
current->loop_depth());
@@ -755,6 +760,12 @@
os << " range: [" << block->rpo_number() << ", " << block->loop_end()
<< ")";
}
+ if (block->loop_header() != NULL) {
+ os << " header: B" << block->loop_header()->id();
+ }
+ if (block->loop_depth() > 0) {
+ os << " depth: " << block->loop_depth();
+ }
os << "\n";
}
}
@@ -774,6 +785,7 @@
DCHECK(header->loop_end() >= 0);
DCHECK(header->loop_end() <= static_cast<int>(order->size()));
DCHECK(header->loop_end() > header->rpo_number());
+ DCHECK(header->loop_header() != header);
// Verify the start ... end list relationship.
int links = 0;
@@ -1073,31 +1085,29 @@
// Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
// into enclosing loop pre-headers until they would preceed their
// ScheduleEarly position.
- BasicBlock* hoist_block = block;
+ BasicBlock* hoist_block = GetPreHeader(block);
while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) {
- if (hoist_block->loop_depth() < block->loop_depth()) {
- block = hoist_block;
- Trace(" hoisting #%d:%s to block %d\n", node->id(),
- node->op()->mnemonic(), block->id().ToInt());
- }
- // Try to hoist to the pre-header of the loop header.
- hoist_block = hoist_block->loop_header();
- if (hoist_block != NULL) {
- BasicBlock* pre_header = hoist_block->dominator();
- DCHECK(pre_header == NULL ||
- *hoist_block->predecessors_begin() == pre_header);
- Trace(
- " hoist to pre-header B%d of loop header B%d, depth would be %d\n",
- pre_header->id().ToInt(), hoist_block->id().ToInt(),
- pre_header->loop_depth());
- hoist_block = pre_header;
- }
+ Trace(" hoisting #%d:%s to block %d\n", node->id(),
+ node->op()->mnemonic(), hoist_block->id().ToInt());
+ DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
+ block = hoist_block;
+ hoist_block = GetPreHeader(hoist_block);
}
ScheduleNode(block, node);
}
private:
+ BasicBlock* GetPreHeader(BasicBlock* block) {
+ if (block->IsLoopHeader()) {
+ return block->dominator();
+ } else if (block->loop_header() != NULL) {
+ return block->loop_header()->dominator();
+ } else {
+ return NULL;
+ }
+ }
+
BasicBlock* GetCommonDominatorOfUses(Node* node) {
BasicBlock* block = NULL;
Node::Uses uses = node->uses();
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index 9204e88..1a4ca96 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -915,6 +915,10 @@
case IrOpcode::kFloat64Mod:
return VisitFloat64Binop(node);
case IrOpcode::kFloat64Sqrt:
+ case IrOpcode::kFloat64Floor:
+ case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat64RoundTruncate:
+ case IrOpcode::kFloat64RoundTiesAway:
return VisitUnop(node, kMachFloat64, kMachFloat64);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 2030b7c..b4a62a1 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -1770,6 +1770,30 @@
}
+Bounds Typer::Visitor::TypeFloat64Floor(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Ceil(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Bounds(Type::Number());
+}
+
+
Bounds Typer::Visitor::TypeLoadStackPointer(Node* node) {
return Bounds(Type::Internal());
}
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index d0b40e7..0bae1ef 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -702,6 +702,10 @@
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Sqrt:
+ case IrOpcode::kFloat64Floor:
+ case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat64RoundTruncate:
+ case IrOpcode::kFloat64RoundTiesAway:
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 03240c2..78713a0 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -1108,6 +1108,12 @@
VisitFloat64Compare(this, node, &cont);
}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::Flag::kNoFlags;
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index ed7eabc..6951d8c 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -462,7 +462,7 @@
DEFINE_BOOL(trace_stub_failures, false,
"trace deoptimization of generated code stubs")
-DEFINE_BOOL(serialize_toplevel, false, "enable caching of toplevel scripts")
+DEFINE_BOOL(serialize_toplevel, true, "enable caching of toplevel scripts")
DEFINE_BOOL(trace_code_serializer, false, "print code serializer trace")
// compiler.cc
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index c4f9600..680e1cc 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -1711,11 +1711,11 @@
AccessorPair* accessors = AccessorPair::cast(callback_obj);
Object* getter = accessors->getter();
if (!getter->IsOddball()) {
- SetPropertyReference(js_obj, entry, String::cast(key), getter, "get %s");
+ SetPropertyReference(js_obj, entry, Name::cast(key), getter, "get %s");
}
Object* setter = accessors->setter();
if (!setter->IsOddball()) {
- SetPropertyReference(js_obj, entry, String::cast(key), setter, "set %s");
+ SetPropertyReference(js_obj, entry, Name::cast(key), setter, "set %s");
}
return true;
}
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 38e4971..94c8937 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -4551,6 +4551,19 @@
}
+bool Heap::RootIsImmortalImmovable(int root_index) {
+ switch (root_index) {
+#define CASE(name) \
+ case Heap::k##name##RootIndex: \
+ return true;
+ IMMORTAL_IMMOVABLE_ROOT_LIST(CASE);
+#undef CASE
+ default:
+ return false;
+ }
+}
+
+
#ifdef VERIFY_HEAP
void Heap::Verify() {
CHECK(HasBeenSetUp());
diff --git a/src/heap/heap.h b/src/heap/heap.h
index f3830dd..ee1fca9 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -198,58 +198,6 @@
SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
-// Heap roots that are known to be immortal immovable, for which we can safely
-// skip write barriers.
-#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
- V(byte_array_map) \
- V(free_space_map) \
- V(one_pointer_filler_map) \
- V(two_pointer_filler_map) \
- V(undefined_value) \
- V(the_hole_value) \
- V(null_value) \
- V(true_value) \
- V(false_value) \
- V(uninitialized_value) \
- V(cell_map) \
- V(global_property_cell_map) \
- V(shared_function_info_map) \
- V(meta_map) \
- V(heap_number_map) \
- V(mutable_heap_number_map) \
- V(native_context_map) \
- V(fixed_array_map) \
- V(code_map) \
- V(scope_info_map) \
- V(fixed_cow_array_map) \
- V(fixed_double_array_map) \
- V(constant_pool_array_map) \
- V(weak_cell_map) \
- V(no_interceptor_result_sentinel) \
- V(hash_table_map) \
- V(ordered_hash_table_map) \
- V(empty_fixed_array) \
- V(empty_byte_array) \
- V(empty_descriptor_array) \
- V(empty_constant_pool_array) \
- V(arguments_marker) \
- V(symbol_map) \
- V(sloppy_arguments_elements_map) \
- V(function_context_map) \
- V(catch_context_map) \
- V(with_context_map) \
- V(block_context_map) \
- V(module_context_map) \
- V(global_context_map) \
- V(undefined_map) \
- V(the_hole_map) \
- V(null_map) \
- V(boolean_map) \
- V(uninitialized_map) \
- V(message_object_map) \
- V(foreign_map) \
- V(neander_map)
-
#define INTERNALIZED_STRING_LIST(V) \
V(Object_string, "Object") \
V(proto_string, "__proto__") \
@@ -351,6 +299,60 @@
V(class_start_position_symbol) \
V(class_end_position_symbol)
+// Heap roots that are known to be immortal immovable, for which we can safely
+// skip write barriers. This list is not complete and has omissions.
+#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
+ V(ByteArrayMap) \
+ V(FreeSpaceMap) \
+ V(OnePointerFillerMap) \
+ V(TwoPointerFillerMap) \
+ V(UndefinedValue) \
+ V(TheHoleValue) \
+ V(NullValue) \
+ V(TrueValue) \
+ V(FalseValue) \
+ V(UninitializedValue) \
+ V(CellMap) \
+ V(GlobalPropertyCellMap) \
+ V(SharedFunctionInfoMap) \
+ V(MetaMap) \
+ V(HeapNumberMap) \
+ V(MutableHeapNumberMap) \
+ V(NativeContextMap) \
+ V(FixedArrayMap) \
+ V(CodeMap) \
+ V(ScopeInfoMap) \
+ V(FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap) \
+ V(ConstantPoolArrayMap) \
+ V(WeakCellMap) \
+ V(NoInterceptorResultSentinel) \
+ V(HashTableMap) \
+ V(OrderedHashTableMap) \
+ V(EmptyFixedArray) \
+ V(EmptyByteArray) \
+ V(EmptyDescriptorArray) \
+ V(EmptyConstantPoolArray) \
+ V(ArgumentsMarker) \
+ V(SymbolMap) \
+ V(SloppyArgumentsElementsMap) \
+ V(FunctionContextMap) \
+ V(CatchContextMap) \
+ V(WithContextMap) \
+ V(BlockContextMap) \
+ V(ModuleContextMap) \
+ V(GlobalContextMap) \
+ V(UndefinedMap) \
+ V(TheHoleMap) \
+ V(NullMap) \
+ V(BooleanMap) \
+ V(UninitializedMap) \
+ V(ArgumentsMarkerMap) \
+ V(JSMessageObjectMap) \
+ V(ForeignMap) \
+ V(NeanderMap) \
+ PRIVATE_SYMBOL_LIST(V)
+
// Forward declarations.
class HeapStats;
class Isolate;
@@ -928,6 +930,8 @@
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
+ static bool RootIsImmortalImmovable(int root_index);
+
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
void Verify();
@@ -1116,6 +1120,8 @@
kSmiRootsStart = kStringTableRootIndex + 1
};
+ Object* root(RootListIndex index) { return roots_[index]; }
+
STATIC_ASSERT(kUndefinedValueRootIndex ==
Internals::kUndefinedValueRootIndex);
STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 1544bad..188119d 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -2862,7 +2862,7 @@
DCHECK(!object_.IsKnownGlobal(heap->nan_value()));
return
#define IMMORTAL_IMMOVABLE_ROOT(name) \
- object_.IsKnownGlobal(heap->name()) ||
+ object_.IsKnownGlobal(heap->root(Heap::k##name##RootIndex)) ||
IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
#undef IMMORTAL_IMMOVABLE_ROOT
#define INTERNALIZED_STRING(name, value) \
@@ -2873,9 +2873,6 @@
object_.IsKnownGlobal(heap->name##_map()) ||
STRING_TYPE_LIST(STRING_TYPE)
#undef STRING_TYPE
-#define SYMBOL(name) object_.IsKnownGlobal(heap->name()) ||
- PRIVATE_SYMBOL_LIST(SYMBOL)
-#undef SYMBOL
false;
}
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index e5618a7..c033fd5 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1062,6 +1062,7 @@
__ push(eax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(eax);
// Check for proxies.
@@ -1083,6 +1084,7 @@
__ bind(&call_runtime);
__ push(eax);
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
__ j(not_equal, &fixed_array);
@@ -1629,6 +1631,7 @@
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in eax.
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
index 9ba5663..1f6eb4e 100644
--- a/src/ic/ic-compiler.cc
+++ b/src/ic/ic-compiler.cc
@@ -92,6 +92,20 @@
Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<Code> stub = ComputeKeyedLoadMonomorphicHandler(receiver_map);
+ PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC);
+ Handle<Code> code =
+ compiler.CompileMonomorphic(HeapType::Class(receiver_map, isolate), stub,
+ isolate->factory()->empty_string(), ELEMENT);
+
+ Map::UpdateCodeCache(receiver_map, name, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
+ Handle<Map> receiver_map) {
+ Isolate* isolate = receiver_map->GetIsolate();
ElementsKind elements_kind = receiver_map->elements_kind();
Handle<Code> stub;
if (receiver_map->has_indexed_interceptor()) {
@@ -110,13 +124,7 @@
} else {
stub = LoadDictionaryElementStub(isolate).GetCode();
}
- PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC);
- Handle<Code> code =
- compiler.CompileMonomorphic(HeapType::Class(receiver_map, isolate), stub,
- isolate->factory()->empty_string(), ELEMENT);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- return code;
+ return stub;
}
diff --git a/src/ic/ic-compiler.h b/src/ic/ic-compiler.h
index 97c07d0..dd898ae 100644
--- a/src/ic/ic-compiler.h
+++ b/src/ic/ic-compiler.h
@@ -34,6 +34,8 @@
ExtraICState extra_ic_state);
// Keyed
+ static Handle<Code> ComputeKeyedLoadMonomorphicHandler(
+ Handle<Map> receiver_map);
static Handle<Code> ComputeKeyedLoadMonomorphic(Handle<Map> receiver_map);
static Handle<Code> ComputeKeyedStoreMonomorphic(
diff --git a/src/json.js b/src/json.js
index f767f4a..e2b7dc8 100644
--- a/src/json.js
+++ b/src/json.js
@@ -220,6 +220,8 @@
function SetUpJSON() {
%CheckIsBootstrapping();
+ %AddNamedProperty($JSON, symbolToStringTag, "JSON", READ_ONLY | DONT_ENUM);
+
// Set up non-enumerable properties of the JSON object.
InstallFunctions($JSON, DONT_ENUM, $Array(
"parse", JSONParse,
diff --git a/src/list-inl.h b/src/list-inl.h
index 60e8fab..9b122fd 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -7,6 +7,7 @@
#include "src/list.h"
+#include "src/base/macros.h"
#include "src/base/platform/platform.h"
namespace v8 {
@@ -33,8 +34,10 @@
void List<T, P>::AddAll(const Vector<T>& other, P alloc) {
int result_length = length_ + other.length();
if (capacity_ < result_length) Resize(result_length, alloc);
- for (int i = 0; i < other.length(); i++) {
- data_[length_ + i] = other.at(i);
+ if (base::is_fundamental<T>()) {
+ memcpy(data_ + length_, other.start(), sizeof(*data_) * other.length());
+ } else {
+ for (int i = 0; i < other.length(); i++) data_[length_ + i] = other.at(i);
}
length_ = result_length;
}
diff --git a/src/math.js b/src/math.js
index 13d030c..860b62f 100644
--- a/src/math.js
+++ b/src/math.js
@@ -321,6 +321,8 @@
%AddNamedProperty(global, "Math", $Math, DONT_ENUM);
%FunctionSetInstanceClassName(MathConstructor, 'Math');
+ %AddNamedProperty($Math, symbolToStringTag, "Math", READ_ONLY | DONT_ENUM);
+
// Set up math constants.
InstallConstants($Math, $Array(
// ECMA-262, section 15.8.1.1.
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index deafb7c..6760b3c 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1121,6 +1121,7 @@
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(a0, v0);
__ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(a0);
// Check for proxies.
@@ -1145,6 +1146,7 @@
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1683,6 +1685,7 @@
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in v0.
diff --git a/src/mips64/full-codegen-mips64.cc b/src/mips64/full-codegen-mips64.cc
index 959edc0..0aae037 100644
--- a/src/mips64/full-codegen-mips64.cc
+++ b/src/mips64/full-codegen-mips64.cc
@@ -1116,6 +1116,7 @@
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(a0, v0);
__ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(a0);
// Check for proxies.
@@ -1140,6 +1141,7 @@
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1680,6 +1682,7 @@
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in v0.
@@ -2520,10 +2523,10 @@
Register scratch = a2;
Register scratch2 = a3;
__ mov(scratch, result_register()); // home_object
- __ lw(v0, MemOperand(sp, kPointerSize)); // value
- __ lw(scratch2, MemOperand(sp, 0)); // this
- __ sw(scratch2, MemOperand(sp, kPointerSize)); // this
- __ sw(scratch, MemOperand(sp, 0)); // home_object
+ __ ld(v0, MemOperand(sp, kPointerSize)); // value
+ __ ld(scratch2, MemOperand(sp, 0)); // this
+ __ sd(scratch2, MemOperand(sp, kPointerSize)); // this
+ __ sd(scratch, MemOperand(sp, 0)); // home_object
// stack: this, home_object; v0: value
EmitNamedSuperPropertyStore(prop);
break;
@@ -2536,13 +2539,13 @@
VisitForAccumulatorValue(prop->key());
Register scratch = a2;
Register scratch2 = a3;
- __ lw(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
+ __ ld(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
// stack: value, this, home_object; v0: key, a3: value
- __ lw(scratch, MemOperand(sp, kPointerSize)); // this
- __ sw(scratch, MemOperand(sp, 2 * kPointerSize));
- __ lw(scratch, MemOperand(sp, 0)); // home_object
- __ sw(scratch, MemOperand(sp, kPointerSize));
- __ sw(v0, MemOperand(sp, 0));
+ __ ld(scratch, MemOperand(sp, kPointerSize)); // this
+ __ sd(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ ld(scratch, MemOperand(sp, 0)); // home_object
+ __ sd(scratch, MemOperand(sp, kPointerSize));
+ __ sd(v0, MemOperand(sp, 0));
__ Move(v0, scratch2);
// stack: this, home_object, key; v0: value.
EmitKeyedSuperPropertyStore(prop);
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index d9a8676..75e2ed1 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -395,6 +395,9 @@
HeapObject::PrintHeader(os, "Symbol");
os << " - hash: " << Hash();
os << "\n - name: " << Brief(name());
+ if (name()->IsUndefined()) {
+ os << " (" << PrivateSymbolToName() << ")";
+ }
os << "\n - private: " << is_private();
os << "\n - own: " << is_own();
os << "\n";
diff --git a/src/objects.cc b/src/objects.cc
index c09f801..81eec5e 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1532,15 +1532,7 @@
}
case SYMBOL_TYPE: {
Symbol* symbol = Symbol::cast(this);
- os << "<Symbol: " << symbol->Hash();
- if (!symbol->name()->IsUndefined()) {
- os << " ";
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- String::cast(symbol->name())->StringShortPrint(&accumulator);
- os << accumulator.ToCString().get();
- }
- os << ">";
+ symbol->SymbolShortPrint(os);
break;
}
case HEAP_NUMBER_TYPE: {
@@ -13616,6 +13608,31 @@
}
+const char* Symbol::PrivateSymbolToName() const {
+ Heap* heap = GetIsolate()->heap();
+#define SYMBOL_CHECK_AND_PRINT(name) \
+ if (this == heap->name()) return #name;
+ PRIVATE_SYMBOL_LIST(SYMBOL_CHECK_AND_PRINT)
+#undef SYMBOL_CHECK_AND_PRINT
+ return "UNKNOWN";
+}
+
+
+void Symbol::SymbolShortPrint(std::ostream& os) {
+ os << "<Symbol: " << Hash();
+ if (!name()->IsUndefined()) {
+ os << " ";
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ String::cast(name())->StringShortPrint(&accumulator);
+ os << accumulator.ToCString().get();
+ } else {
+ os << " (" << PrivateSymbolToName() << ")";
+ }
+ os << ">";
+}
+
+
// StringSharedKeys are used as keys in the eval cache.
class StringSharedKey : public HashTableKey {
public:
@@ -15278,7 +15295,7 @@
if (details.IsDeleted()) continue;
if (details.type() == CALLBACKS) return true;
PropertyAttributes attr = details.attributes();
- if (attr & (READ_ONLY | DONT_DELETE)) return true;
+ if (attr & (READ_ONLY | DONT_DELETE | DONT_ENUM)) return true;
}
}
return false;
diff --git a/src/objects.h b/src/objects.h
index 5969ae6..63c8d99 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3551,7 +3551,7 @@
int NumberOfEnumElements();
// Returns true if the dictionary contains any elements that are non-writable,
- // non-configurable, or have getters/setters.
+ // non-configurable, non-enumerable, or have getters/setters.
bool HasComplexElements();
enum SortMode { UNSORTED, SORTED };
@@ -8654,10 +8654,14 @@
typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
+ void SymbolShortPrint(std::ostream& os);
+
private:
static const int kPrivateBit = 0;
static const int kOwnBit = 1;
+ const char* PrivateSymbolToName() const;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
};
diff --git a/src/preparser.cc b/src/preparser.cc
index 316f129..2978cdd 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -59,10 +59,10 @@
if (scanner->UnescapedLiteralMatches("arguments", 9)) {
return PreParserIdentifier::Arguments();
}
- if (scanner->UnescapedLiteralMatches("prototype", 9)) {
+ if (scanner->LiteralMatches("prototype", 9)) {
return PreParserIdentifier::Prototype();
}
- if (scanner->UnescapedLiteralMatches("constructor", 11)) {
+ if (scanner->LiteralMatches("constructor", 11)) {
return PreParserIdentifier::Constructor();
}
return PreParserIdentifier::Default();
diff --git a/src/runtime/runtime-function.cc b/src/runtime/runtime-function.cc
index b57064f..e25b659 100644
--- a/src/runtime/runtime-function.cc
+++ b/src/runtime/runtime-function.cc
@@ -67,13 +67,32 @@
}
+static Handle<String> NameToFunctionName(Handle<Name> name) {
+ Handle<String> stringName(name->GetHeap()->empty_string());
+
+ // TODO(caitp): Follow proper rules in section 9.2.11 (SetFunctionName)
+ if (name->IsSymbol()) {
+ Handle<Object> description(Handle<Symbol>::cast(name)->name(),
+ name->GetIsolate());
+ if (description->IsString()) {
+ stringName = Handle<String>::cast(description);
+ }
+ } else {
+ stringName = Handle<String>::cast(name);
+ }
+
+ return stringName;
+}
+
+
RUNTIME_FUNCTION(Runtime_FunctionSetName) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
- f->shared()->set_name(name);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+
+ f->shared()->set_name(*NameToFunctionName(name));
return isolate->heap()->undefined_value();
}
diff --git a/src/scanner.h b/src/scanner.h
index 7f35e71..387d331 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -394,16 +394,20 @@
const AstRawString* NextSymbol(AstValueFactory* ast_value_factory);
double DoubleValue();
- bool UnescapedLiteralMatches(const char* data, int length) {
+ bool LiteralMatches(const char* data, int length, bool allow_escapes = true) {
if (is_literal_one_byte() &&
literal_length() == length &&
- !literal_contains_escapes()) {
+ (allow_escapes || !literal_contains_escapes())) {
const char* token =
reinterpret_cast<const char*>(literal_one_byte_string().start());
return !strncmp(token, data, length);
}
return false;
}
+ inline bool UnescapedLiteralMatches(const char* data, int length) {
+ return LiteralMatches(data, length, false);
+ }
+
void IsGetOrSet(bool* is_get, bool* is_set) {
if (is_literal_one_byte() &&
literal_length() == 3 &&
diff --git a/src/serialize.cc b/src/serialize.cc
index ba4bf41..28838ed 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1677,11 +1677,10 @@
while (current < end && !(*current)->IsSmi()) {
HeapObject* current_contents = HeapObject::cast(*current);
int root_index = serializer_->root_index_map()->Lookup(current_contents);
- // Repeats are not subject to the write barrier so there are only some
- // objects that can be used in a repeat encoding. These are the early
- // ones in the root array that are never in new space.
+ // Repeats are not subject to the write barrier so we can only use
+ // immortal immovable root members. They are never in new space.
if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
- root_index < kRootArrayNumberOfConstantEncodings &&
+ Heap::RootIsImmortalImmovable(root_index) &&
current_contents == current[-1]) {
DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
int repeat_count = 1;
@@ -1908,7 +1907,7 @@
// Large objects are allocated one-by-one when deserializing. We do not
// have to keep track of multiple chunks.
pending_chunk_[LO_SPACE] += size;
- return BackReference(LO_SPACE, 0, seen_large_objects_index_++);
+ return BackReference::LargeObjectReference(seen_large_objects_index_++);
}
@@ -1925,7 +1924,8 @@
}
uint32_t offset = pending_chunk_[space];
pending_chunk_[space] = new_chunk_size;
- return BackReference(space, completed_chunks_[space].length(), offset);
+ return BackReference::Reference(space, completed_chunks_[space].length(),
+ offset);
}
@@ -2007,12 +2007,17 @@
BackReference back_reference = back_reference_map_.Lookup(obj);
if (back_reference.is_valid()) {
- if (FLAG_trace_code_serializer) {
- PrintF(" Encoding back reference to: ");
- obj->ShortPrint();
- PrintF("\n");
+ if (back_reference.is_source()) {
+ DCHECK_EQ(source_, obj);
+ SerializeSourceObject(how_to_code, where_to_point);
+ } else {
+ if (FLAG_trace_code_serializer) {
+ PrintF(" Encoding back reference to: ");
+ obj->ShortPrint();
+ PrintF("\n");
+ }
+ SerializeBackReference(back_reference, how_to_code, where_to_point, skip);
}
- SerializeBackReference(back_reference, how_to_code, where_to_point, skip);
return;
}
@@ -2056,11 +2061,6 @@
UNREACHABLE();
}
- if (obj == source_) {
- SerializeSourceObject(how_to_code, where_to_point);
- return;
- }
-
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
diff --git a/src/serialize.h b/src/serialize.h
index 3766801..ae64462 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -189,16 +189,26 @@
public:
explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
- BackReference(AllocationSpace space, uint32_t chunk_index,
- uint32_t chunk_offset) {
- DCHECK(IsAligned(chunk_offset, kObjectAlignment));
- bitfield_ = SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
- ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits);
- }
-
BackReference() : bitfield_(kInvalidValue) {}
+ static BackReference SourceReference() { return BackReference(kSourceValue); }
+
+ static BackReference LargeObjectReference(uint32_t index) {
+ return BackReference(SpaceBits::encode(LO_SPACE) |
+ ChunkOffsetBits::encode(index));
+ }
+
+ static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
+ uint32_t chunk_offset) {
+ DCHECK(IsAligned(chunk_offset, kObjectAlignment));
+ DCHECK_NE(LO_SPACE, space);
+ return BackReference(
+ SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
+ ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
+ }
+
bool is_valid() const { return bitfield_ != kInvalidValue; }
+ bool is_source() const { return bitfield_ == kSourceValue; }
AllocationSpace space() const {
DCHECK(is_valid());
@@ -224,6 +234,7 @@
private:
static const uint32_t kInvalidValue = 0xFFFFFFFF;
+ static const uint32_t kSourceValue = 0xFFFFFFFE;
static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
@@ -263,6 +274,10 @@
SetValue(entry, b.bitfield());
}
+ void AddSourceString(String* string) {
+ Add(string, BackReference::SourceReference());
+ }
+
private:
DisallowHeapAllocation no_allocation_;
HashMap* map_;
@@ -700,7 +715,9 @@
: Serializer(isolate, sink),
source_(source),
main_code_(main_code),
- num_internalized_strings_(0) {}
+ num_internalized_strings_(0) {
+ back_reference_map_.AddSourceString(source);
+ }
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip);
diff --git a/src/string-iterator.js b/src/string-iterator.js
index cb578e7..dcaddaf 100644
--- a/src/string-iterator.js
+++ b/src/string-iterator.js
@@ -87,6 +87,8 @@
%FunctionSetName(StringIteratorIterator, '[Symbol.iterator]');
%AddNamedProperty(StringIterator.prototype, symbolIterator,
StringIteratorIterator, DONT_ENUM);
+ %AddNamedProperty(StringIterator.prototype, symbolToStringTag,
+ "String Iterator", READ_ONLY | DONT_ENUM);
}
SetUpStringIterator();
diff --git a/src/typedarray.js b/src/typedarray.js
index c149b35..4420bce 100644
--- a/src/typedarray.js
+++ b/src/typedarray.js
@@ -291,6 +291,13 @@
}
}
+function TypedArrayGetToStringTag() {
+ if (!%IsTypedArray(this)) return;
+ var name = %_ClassOf(this);
+ if (IS_UNDEFINED(name)) return;
+ return name;
+}
+
// -------------------------------------------------------------------
function SetupTypedArrays() {
@@ -310,7 +317,8 @@
InstallGetter(global.NAME.prototype, "byteOffset", NAME_GetByteOffset);
InstallGetter(global.NAME.prototype, "byteLength", NAME_GetByteLength);
InstallGetter(global.NAME.prototype, "length", NAME_GetLength);
-
+ InstallGetter(global.NAME.prototype, symbolToStringTag,
+ TypedArrayGetToStringTag);
InstallFunctions(global.NAME.prototype, DONT_ENUM, $Array(
"subarray", NAMESubArray,
"set", TypedArraySet
@@ -437,6 +445,8 @@
// Set up constructor property on the DataView prototype.
%AddNamedProperty($DataView.prototype, "constructor", $DataView, DONT_ENUM);
+ %AddNamedProperty(
+ $DataView.prototype, symbolToStringTag, "DataView", READ_ONLY|DONT_ENUM);
InstallGetter($DataView.prototype, "buffer", DataViewGetBufferJS);
InstallGetter($DataView.prototype, "byteOffset", DataViewGetByteOffset);
diff --git a/src/version.cc b/src/version.cc
index 7dda112..bb2734c 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 30
-#define BUILD_NUMBER 18
+#define BUILD_NUMBER 19
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 4cbdecb..4229f44 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1086,6 +1086,7 @@
__ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(rax);
// Check for proxies.
@@ -1110,6 +1111,7 @@
__ bind(&call_runtime);
__ Push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1663,6 +1665,7 @@
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in rax.
diff --git a/src/x87/full-codegen-x87.cc b/src/x87/full-codegen-x87.cc
index c2575df..9240fc8 100644
--- a/src/x87/full-codegen-x87.cc
+++ b/src/x87/full-codegen-x87.cc
@@ -1051,6 +1051,7 @@
__ push(eax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(eax);
// Check for proxies.
@@ -1072,6 +1073,7 @@
__ bind(&call_runtime);
__ push(eax);
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
__ j(not_equal, &fixed_array);
@@ -1618,6 +1620,7 @@
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in eax.
diff --git a/test/cctest/compiler/test-schedule.cc b/test/cctest/compiler/test-schedule.cc
index 63e7c2c..da52a66 100644
--- a/test/cctest/compiler/test-schedule.cc
+++ b/test/cctest/compiler/test-schedule.cc
@@ -30,12 +30,11 @@
TEST(TestScheduleAddNode) {
HandleAndZoneScope scope;
+ Schedule schedule(scope.main_zone());
Graph graph(scope.main_zone());
Node* n0 = graph.NewNode(&dummy_operator);
Node* n1 = graph.NewNode(&dummy_operator);
- Schedule schedule(scope.main_zone());
-
BasicBlock* entry = schedule.start();
schedule.AddNode(entry, n0);
schedule.AddNode(entry, n1);
@@ -51,8 +50,8 @@
TEST(TestScheduleAddGoto) {
HandleAndZoneScope scope;
-
Schedule schedule(scope.main_zone());
+
BasicBlock* entry = schedule.start();
BasicBlock* next = schedule.NewBasicBlock();
@@ -71,16 +70,15 @@
TEST(TestScheduleAddBranch) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
-
- BasicBlock* entry = schedule.start();
- BasicBlock* tblock = schedule.NewBasicBlock();
- BasicBlock* fblock = schedule.NewBasicBlock();
-
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
Node* n0 = graph.NewNode(&dummy_operator);
Node* b = graph.NewNode(common.Branch(), n0);
+ BasicBlock* entry = schedule.start();
+ BasicBlock* tblock = schedule.NewBasicBlock();
+ BasicBlock* fblock = schedule.NewBasicBlock();
+
schedule.AddBranch(entry, b, tblock, fblock);
CHECK_EQ(0, static_cast<int>(entry->PredecessorCount()));
@@ -126,6 +124,40 @@
}
+TEST(TestScheduleInsertBranch) {
+ HandleAndZoneScope scope;
+ Schedule schedule(scope.main_zone());
+ Graph graph(scope.main_zone());
+ CommonOperatorBuilder common(scope.main_zone());
+ Node* n0 = graph.NewNode(&dummy_operator);
+ Node* n1 = graph.NewNode(&dummy_operator);
+ Node* b = graph.NewNode(common.Branch(), n1);
+
+ BasicBlock* entry = schedule.start();
+ BasicBlock* tblock = schedule.NewBasicBlock();
+ BasicBlock* fblock = schedule.NewBasicBlock();
+ BasicBlock* merge = schedule.NewBasicBlock();
+ schedule.AddReturn(entry, n0);
+ schedule.AddGoto(tblock, merge);
+ schedule.AddGoto(fblock, merge);
+
+ schedule.InsertBranch(entry, merge, b, tblock, fblock);
+
+ CHECK_EQ(0, static_cast<int>(entry->PredecessorCount()));
+ CHECK_EQ(2, static_cast<int>(entry->SuccessorCount()));
+ CHECK_EQ(tblock, entry->SuccessorAt(0));
+ CHECK_EQ(fblock, entry->SuccessorAt(1));
+
+ CHECK_EQ(2, static_cast<int>(merge->PredecessorCount()));
+ CHECK_EQ(1, static_cast<int>(merge->SuccessorCount()));
+ CHECK_EQ(schedule.end(), merge->SuccessorAt(0));
+
+ CHECK_EQ(1, static_cast<int>(schedule.end()->PredecessorCount()));
+ CHECK_EQ(0, static_cast<int>(schedule.end()->SuccessorCount()));
+ CHECK_EQ(merge, schedule.end()->PredecessorAt(0));
+}
+
+
TEST(BuildMulNodeGraph) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
diff --git a/test/cctest/compiler/test-scheduler.cc b/test/cctest/compiler/test-scheduler.cc
index 894f0f0..e866876 100644
--- a/test/cctest/compiler/test-scheduler.cc
+++ b/test/cctest/compiler/test-scheduler.cc
@@ -24,39 +24,21 @@
using namespace v8::internal::compiler;
// TODO(titzer): pull RPO tests out to their own file.
-struct TestLoop {
- int count;
- BasicBlock** nodes;
- BasicBlock* header() { return nodes[0]; }
- BasicBlock* last() { return nodes[count - 1]; }
- ~TestLoop() { delete[] nodes; }
-};
-
-
-static TestLoop* CreateLoop(Schedule* schedule, int count) {
- TestLoop* loop = new TestLoop();
- loop->count = count;
- loop->nodes = new BasicBlock* [count];
- for (int i = 0; i < count; i++) {
- loop->nodes[i] = schedule->NewBasicBlock();
- if (i > 0) schedule->AddSuccessor(loop->nodes[i - 1], loop->nodes[i]);
- }
- schedule->AddSuccessor(loop->nodes[count - 1], loop->nodes[0]);
- return loop;
-}
-
-
static void CheckRPONumbers(BasicBlockVector* order, size_t expected,
bool loops_allowed) {
CHECK(expected == order->size());
for (int i = 0; i < static_cast<int>(order->size()); i++) {
CHECK(order->at(i)->rpo_number() == i);
- if (!loops_allowed) CHECK_LT(order->at(i)->loop_end(), 0);
+ if (!loops_allowed) {
+ CHECK_LT(order->at(i)->loop_end(), 0);
+ CHECK_EQ(NULL, order->at(i)->loop_header());
+ }
}
}
-static void CheckLoopContains(BasicBlock** blocks, int body_size) {
+static void CheckLoop(BasicBlockVector* order, BasicBlock** blocks,
+ int body_size) {
BasicBlock* header = blocks[0];
CHECK_GT(header->loop_end(), 0);
CHECK_EQ(body_size, (header->loop_end() - header->rpo_number()));
@@ -66,6 +48,38 @@
CHECK(header->LoopContains(blocks[i]));
CHECK(header->IsLoopHeader() || blocks[i]->loop_header() == header);
}
+ if (header->rpo_number() > 0) {
+ CHECK_NE(order->at(header->rpo_number() - 1)->loop_header(), header);
+ }
+ if (header->loop_end() < static_cast<int>(order->size())) {
+ CHECK_NE(order->at(header->loop_end())->loop_header(), header);
+ }
+}
+
+
+struct TestLoop {
+ int count;
+ BasicBlock** nodes;
+ BasicBlock* header() { return nodes[0]; }
+ BasicBlock* last() { return nodes[count - 1]; }
+ ~TestLoop() { delete[] nodes; }
+
+ void Check(BasicBlockVector* order) { CheckLoop(order, nodes, count); }
+};
+
+
+static TestLoop* CreateLoop(Schedule* schedule, int count) {
+ TestLoop* loop = new TestLoop();
+ loop->count = count;
+ loop->nodes = new BasicBlock* [count];
+ for (int i = 0; i < count; i++) {
+ loop->nodes[i] = schedule->NewBasicBlock();
+ if (i > 0) {
+ schedule->AddSuccessorForTesting(loop->nodes[i - 1], loop->nodes[i]);
+ }
+ }
+ schedule->AddSuccessorForTesting(loop->nodes[count - 1], loop->nodes[0]);
+ return loop;
}
@@ -160,25 +174,25 @@
TEST(RPOSelfLoop) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- schedule.AddSuccessor(schedule.start(), schedule.start());
+ schedule.AddSuccessorForTesting(schedule.start(), schedule.start());
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 1, true);
BasicBlock* loop[] = {schedule.start()};
- CheckLoopContains(loop, 1);
+ CheckLoop(order, loop, 1);
}
TEST(RPOEntryLoop) {
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
- schedule.AddSuccessor(schedule.start(), schedule.end());
- schedule.AddSuccessor(schedule.end(), schedule.start());
+ schedule.AddSuccessorForTesting(schedule.start(), schedule.end());
+ schedule.AddSuccessorForTesting(schedule.end(), schedule.start());
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 2, true);
BasicBlock* loop[] = {schedule.start(), schedule.end()};
- CheckLoopContains(loop, 2);
+ CheckLoop(order, loop, 2);
}
@@ -186,11 +200,11 @@
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
- schedule.AddSuccessor(schedule.start(), loop1->header());
+ schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 3, true);
- CheckLoopContains(loop1->nodes, loop1->count);
+ loop1->Check(order);
}
@@ -198,12 +212,12 @@
HandleAndZoneScope scope;
Schedule schedule(scope.main_zone());
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
- schedule.AddSuccessor(schedule.start(), loop1->header());
- schedule.AddSuccessor(loop1->last(), schedule.start());
+ schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 3, true);
- CheckLoopContains(loop1->nodes, loop1->count);
+ loop1->Check(order);
}
@@ -216,10 +230,10 @@
BasicBlock* C = schedule.NewBasicBlock();
BasicBlock* D = schedule.end();
- schedule.AddSuccessor(A, B);
- schedule.AddSuccessor(A, C);
- schedule.AddSuccessor(B, D);
- schedule.AddSuccessor(C, D);
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(A, C);
+ schedule.AddSuccessorForTesting(B, D);
+ schedule.AddSuccessorForTesting(C, D);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
@@ -241,16 +255,16 @@
BasicBlock* C = schedule.NewBasicBlock();
BasicBlock* D = schedule.end();
- schedule.AddSuccessor(A, B);
- schedule.AddSuccessor(B, C);
- schedule.AddSuccessor(C, B);
- schedule.AddSuccessor(C, D);
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(C, D);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 4, true);
BasicBlock* loop[] = {B, C};
- CheckLoopContains(loop, 2);
+ CheckLoop(order, loop, 2);
}
@@ -263,16 +277,16 @@
BasicBlock* C = schedule.NewBasicBlock();
BasicBlock* D = schedule.end();
- schedule.AddSuccessor(A, B);
- schedule.AddSuccessor(B, C);
- schedule.AddSuccessor(C, B);
- schedule.AddSuccessor(B, D);
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(B, D);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 4, true);
BasicBlock* loop[] = {B, C};
- CheckLoopContains(loop, 2);
+ CheckLoop(order, loop, 2);
}
@@ -289,34 +303,34 @@
BasicBlock* F = schedule.NewBasicBlock();
BasicBlock* G = schedule.end();
- schedule.AddSuccessor(A, B);
- schedule.AddSuccessor(B, C);
- schedule.AddSuccessor(C, D);
- schedule.AddSuccessor(D, E);
- schedule.AddSuccessor(E, F);
- schedule.AddSuccessor(F, B);
- schedule.AddSuccessor(B, G);
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, F);
+ schedule.AddSuccessorForTesting(F, B);
+ schedule.AddSuccessorForTesting(B, G);
// Throw in extra backedges from time to time.
- if (i == 1) schedule.AddSuccessor(B, B);
- if (i == 2) schedule.AddSuccessor(C, B);
- if (i == 3) schedule.AddSuccessor(D, B);
- if (i == 4) schedule.AddSuccessor(E, B);
- if (i == 5) schedule.AddSuccessor(F, B);
+ if (i == 1) schedule.AddSuccessorForTesting(B, B);
+ if (i == 2) schedule.AddSuccessorForTesting(C, B);
+ if (i == 3) schedule.AddSuccessorForTesting(D, B);
+ if (i == 4) schedule.AddSuccessorForTesting(E, B);
+ if (i == 5) schedule.AddSuccessorForTesting(F, B);
// Throw in extra loop exits from time to time.
- if (i == 6) schedule.AddSuccessor(B, G);
- if (i == 7) schedule.AddSuccessor(C, G);
- if (i == 8) schedule.AddSuccessor(D, G);
- if (i == 9) schedule.AddSuccessor(E, G);
- if (i == 10) schedule.AddSuccessor(F, G);
+ if (i == 6) schedule.AddSuccessorForTesting(B, G);
+ if (i == 7) schedule.AddSuccessorForTesting(C, G);
+ if (i == 8) schedule.AddSuccessorForTesting(D, G);
+ if (i == 9) schedule.AddSuccessorForTesting(E, G);
+ if (i == 10) schedule.AddSuccessorForTesting(F, G);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order =
Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 7, true);
BasicBlock* loop[] = {B, C, D, E, F};
- CheckLoopContains(loop, 5);
+ CheckLoop(order, loop, 5);
}
}
@@ -332,22 +346,22 @@
BasicBlock* E = schedule.NewBasicBlock();
BasicBlock* F = schedule.end();
- schedule.AddSuccessor(A, B);
- schedule.AddSuccessor(B, C);
- schedule.AddSuccessor(C, D);
- schedule.AddSuccessor(D, C);
- schedule.AddSuccessor(D, E);
- schedule.AddSuccessor(E, B);
- schedule.AddSuccessor(E, F);
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, C);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, B);
+ schedule.AddSuccessorForTesting(E, F);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 6, true);
BasicBlock* loop1[] = {B, C, D, E};
- CheckLoopContains(loop1, 4);
+ CheckLoop(order, loop1, 4);
BasicBlock* loop2[] = {C, D};
- CheckLoopContains(loop2, 2);
+ CheckLoop(order, loop2, 2);
}
@@ -364,29 +378,29 @@
BasicBlock* G = schedule.NewBasicBlock();
BasicBlock* H = schedule.end();
- schedule.AddSuccessor(A, B);
- schedule.AddSuccessor(B, C);
- schedule.AddSuccessor(C, D);
- schedule.AddSuccessor(D, E);
- schedule.AddSuccessor(E, F);
- schedule.AddSuccessor(F, G);
- schedule.AddSuccessor(G, H);
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, F);
+ schedule.AddSuccessorForTesting(F, G);
+ schedule.AddSuccessorForTesting(G, H);
- schedule.AddSuccessor(E, D);
- schedule.AddSuccessor(F, C);
- schedule.AddSuccessor(G, B);
+ schedule.AddSuccessorForTesting(E, D);
+ schedule.AddSuccessorForTesting(F, C);
+ schedule.AddSuccessorForTesting(G, B);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 8, true);
BasicBlock* loop1[] = {B, C, D, E, F, G};
- CheckLoopContains(loop1, 6);
+ CheckLoop(order, loop1, 6);
BasicBlock* loop2[] = {C, D, E, F};
- CheckLoopContains(loop2, 4);
+ CheckLoop(order, loop2, 4);
BasicBlock* loop3[] = {D, E};
- CheckLoopContains(loop3, 2);
+ CheckLoop(order, loop3, 2);
}
@@ -400,19 +414,18 @@
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- schedule.AddSuccessor(A, loop1->header());
- schedule.AddSuccessor(loop1->header(), loop2->header());
- schedule.AddSuccessor(loop2->last(), E);
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), E);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
- CheckLoopContains(loop1->nodes, loop1->count);
-
CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
static_cast<int>(order->size()));
- CheckLoopContains(loop1->nodes, loop1->count);
- CheckLoopContains(loop2->nodes, loop2->count);
+
+ loop1->Check(order);
+ loop2->Check(order);
}
@@ -427,20 +440,18 @@
BasicBlock* S = schedule.NewBasicBlock();
BasicBlock* E = schedule.end();
- schedule.AddSuccessor(A, loop1->header());
- schedule.AddSuccessor(loop1->header(), S);
- schedule.AddSuccessor(S, loop2->header());
- schedule.AddSuccessor(loop2->last(), E);
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), S);
+ schedule.AddSuccessorForTesting(S, loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), E);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
- CheckLoopContains(loop1->nodes, loop1->count);
-
CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
static_cast<int>(order->size()));
- CheckLoopContains(loop1->nodes, loop1->count);
- CheckLoopContains(loop2->nodes, loop2->count);
+ loop1->Check(order);
+ loop2->Check(order);
}
@@ -455,18 +466,17 @@
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- schedule.AddSuccessor(A, loop1->header());
- schedule.AddSuccessor(loop1->nodes[exit], loop2->header());
- schedule.AddSuccessor(loop2->nodes[exit], E);
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[exit], loop2->header());
+ schedule.AddSuccessorForTesting(loop2->nodes[exit], E);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order =
Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
- CheckLoopContains(loop1->nodes, loop1->count);
CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
static_cast<int>(order->size()));
- CheckLoopContains(loop1->nodes, loop1->count);
- CheckLoopContains(loop2->nodes, loop2->count);
+ loop1->Check(order);
+ loop2->Check(order);
}
}
}
@@ -484,25 +494,23 @@
BasicBlock* C = schedule.NewBasicBlock();
BasicBlock* E = schedule.end();
- schedule.AddSuccessor(A, B);
- schedule.AddSuccessor(B, loop1->header());
- schedule.AddSuccessor(loop1->header(), loop2->header());
- schedule.AddSuccessor(loop2->last(), C);
- schedule.AddSuccessor(C, E);
- schedule.AddSuccessor(C, B);
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), C);
+ schedule.AddSuccessorForTesting(C, E);
+ schedule.AddSuccessorForTesting(C, B);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
- CheckLoopContains(loop1->nodes, loop1->count);
-
CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
static_cast<int>(order->size()));
- CheckLoopContains(loop1->nodes, loop1->count);
- CheckLoopContains(loop2->nodes, loop2->count);
+ loop1->Check(order);
+ loop2->Check(order);
BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
- CheckLoopContains(loop3, 4);
+ CheckLoop(order, loop3, 4);
}
@@ -517,17 +525,17 @@
BasicBlock* E = schedule.end();
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessor(A, loop1->header());
- schedule.AddSuccessor(loop1->last(), E);
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
- schedule.AddSuccessor(loop1->nodes[i], loop1->header());
- schedule.AddSuccessor(loop1->nodes[j], E);
+ schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[j], E);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order =
Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- CheckLoopContains(loop1->nodes, loop1->count);
+ loop1->Check(order);
}
}
}
@@ -545,18 +553,18 @@
BasicBlock* E = schedule.end();
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessor(A, loop1->header());
- schedule.AddSuccessor(loop1->last(), E);
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
- schedule.AddSuccessor(loop1->nodes[i], loop1->header());
- schedule.AddSuccessor(loop1->nodes[j], D);
- schedule.AddSuccessor(D, E);
+ schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[j], D);
+ schedule.AddSuccessorForTesting(D, E);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order =
Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- CheckLoopContains(loop1->nodes, loop1->count);
+ loop1->Check(order);
}
}
}
@@ -572,20 +580,20 @@
BasicBlock* E = schedule.end();
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessor(A, loop1->header());
- schedule.AddSuccessor(loop1->last(), E);
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
for (int j = 0; j < size; j++) {
BasicBlock* O = schedule.NewBasicBlock();
- schedule.AddSuccessor(loop1->nodes[j], O);
- schedule.AddSuccessor(O, E);
+ schedule.AddSuccessorForTesting(loop1->nodes[j], O);
+ schedule.AddSuccessorForTesting(O, E);
}
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order =
Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- CheckLoopContains(loop1->nodes, loop1->count);
+ loop1->Check(order);
}
}
@@ -599,24 +607,24 @@
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessor(A, loop1->header());
- schedule.AddSuccessor(loop1->last(), E);
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
TestLoop** loopN = new TestLoop* [size];
for (int j = 0; j < size; j++) {
loopN[j] = CreateLoop(&schedule, 2);
- schedule.AddSuccessor(loop1->nodes[j], loopN[j]->header());
- schedule.AddSuccessor(loopN[j]->last(), E);
+ schedule.AddSuccessorForTesting(loop1->nodes[j], loopN[j]->header());
+ schedule.AddSuccessorForTesting(loopN[j]->last(), E);
}
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order =
Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- CheckLoopContains(loop1->nodes, loop1->count);
+ loop1->Check(order);
for (int j = 0; j < size; j++) {
- CheckLoopContains(loopN[j]->nodes, loopN[j]->count);
+ loopN[j]->Check(order);
delete loopN[j];
}
delete[] loopN;
@@ -634,20 +642,20 @@
BasicBlock* D = schedule.end();
BasicBlock* E = schedule.NewBasicBlock();
- schedule.AddSuccessor(A, B);
- schedule.AddSuccessor(B, C);
- schedule.AddSuccessor(B, D);
- schedule.AddSuccessor(B, E);
- schedule.AddSuccessor(C, B);
- schedule.AddSuccessor(D, B);
- schedule.AddSuccessor(E, B);
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(B, D);
+ schedule.AddSuccessorForTesting(B, E);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(D, B);
+ schedule.AddSuccessorForTesting(E, B);
ZonePool zone_pool(scope.main_isolate());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
CheckRPONumbers(order, 5, true);
BasicBlock* loop1[] = {B, C, D, E};
- CheckLoopContains(loop1, 4);
+ CheckLoop(order, loop1, 4);
}
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 8c1a4d9..9079d5a 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -1442,21 +1442,19 @@
CHECK_EQ(expected_, t.result);
-TEST(18) {
+TEST(sdiv) {
// Test the sdiv.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
-
- typedef struct {
- uint32_t dividend;
- uint32_t divisor;
- uint32_t result;
- } T;
- T t;
-
Assembler assm(isolate, NULL, 0);
+ struct T {
+ int32_t dividend;
+ int32_t divisor;
+ int32_t result;
+ } t;
+
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(&assm, SUDIV);
@@ -1480,6 +1478,8 @@
#endif
F3 f = FUNCTION_CAST<F3>(code->entry());
Object* dummy;
+ TEST_SDIV(0, kMinInt, 0);
+ TEST_SDIV(0, 1024, 0);
TEST_SDIV(1073741824, kMinInt, -2);
TEST_SDIV(kMinInt, kMinInt, -1);
TEST_SDIV(5, 10, 2);
@@ -1498,6 +1498,62 @@
#undef TEST_SDIV
+#define TEST_UDIV(expected_, dividend_, divisor_) \
+ t.dividend = dividend_; \
+ t.divisor = divisor_; \
+ t.result = 0; \
+ dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); \
+ CHECK_EQ(expected_, t.result);
+
+
+TEST(udiv) {
+ // Test the udiv.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, NULL, 0);
+
+ struct T {
+ uint32_t dividend;
+ uint32_t divisor;
+ uint32_t result;
+ } t;
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(&assm, SUDIV);
+
+ __ mov(r3, Operand(r0));
+
+ __ ldr(r0, MemOperand(r3, OFFSET_OF(T, dividend)));
+ __ ldr(r1, MemOperand(r3, OFFSET_OF(T, divisor)));
+
+ __ sdiv(r2, r0, r1);
+ __ str(r2, MemOperand(r3, OFFSET_OF(T, result)));
+
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ Object* dummy;
+ TEST_UDIV(0, 0, 0);
+ TEST_UDIV(0, 1024, 0);
+ TEST_UDIV(5, 10, 2);
+ TEST_UDIV(3, 10, 3);
+ USE(dummy);
+ }
+}
+
+
+#undef TEST_UDIV
+
+
TEST(smmla) {
CcTest::InitializeVM();
Isolate* const isolate = CcTest::i_isolate();
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index 7ab4722..4028167 100644
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -3973,6 +3973,13 @@
"static get prototype() {}",
"static set prototype(_) {}",
"static *prototype() {}",
+ "static 'prototype'() {}",
+ "static *'prototype'() {}",
+ "static prot\\u006ftype() {}",
+ "static 'prot\\u006ftype'() {}",
+ "static get 'prot\\u006ftype'() {}",
+ "static set 'prot\\u006ftype'(_) {}",
+ "static *'prot\\u006ftype'() {}",
NULL};
static const ParserFlag always_flags[] = {
@@ -3993,6 +4000,13 @@
"get constructor() {}",
"get constructor(_) {}",
"*constructor() {}",
+ "get 'constructor'() {}",
+ "*'constructor'() {}",
+ "get c\\u006fnstructor() {}",
+ "*c\\u006fnstructor() {}",
+ "get 'c\\u006fnstructor'() {}",
+ "get 'c\\u006fnstructor'(_) {}",
+ "*'c\\u006fnstructor'() {}",
NULL};
static const ParserFlag always_flags[] = {
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index ac716a4..0c0e522 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -867,6 +867,8 @@
CHECK_EQ(6 * 1000000, Handle<String>::cast(copy_result)->length());
CHECK(isolate->heap()->InSpace(HeapObject::cast(*copy_result), LO_SPACE));
+ // Make sure we do not serialize too much, e.g. include the source string.
+ CHECK_LT(cache->length(), 7000000);
delete cache;
source.Dispose();
diff --git a/test/mjsunit/es6/array-iterator.js b/test/mjsunit/es6/array-iterator.js
index 96122cd..767991e 100644
--- a/test/mjsunit/es6/array-iterator.js
+++ b/test/mjsunit/es6/array-iterator.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --harmony-tostring
var NONE = 0;
@@ -158,6 +158,15 @@
Object.getOwnPropertyNames(ArrayIteratorPrototype));
assertHasOwnProperty(ArrayIteratorPrototype, 'next', DONT_ENUM);
assertHasOwnProperty(ArrayIteratorPrototype, Symbol.iterator, DONT_ENUM);
+
+ assertEquals("[object Array Iterator]",
+ Object.prototype.toString.call(iterator));
+ assertEquals("Array Iterator", ArrayIteratorPrototype[Symbol.toStringTag]);
+ var desc = Object.getOwnPropertyDescriptor(
+ ArrayIteratorPrototype, Symbol.toStringTag);
+ assertTrue(desc.configurable);
+ assertFalse(desc.writable);
+ assertEquals("Array Iterator", desc.value);
}
TestArrayIteratorPrototype();
diff --git a/test/mjsunit/es6/collection-iterator.js b/test/mjsunit/es6/collection-iterator.js
index 5503fe5..18b3f1a 100644
--- a/test/mjsunit/es6/collection-iterator.js
+++ b/test/mjsunit/es6/collection-iterator.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --harmony-tostring
(function TestSetIterator() {
@@ -19,6 +19,15 @@
assertEquals(new Set().values().__proto__, SetIteratorPrototype);
assertEquals(new Set().entries().__proto__, SetIteratorPrototype);
+
+ assertEquals("[object Set Iterator]",
+ Object.prototype.toString.call(iter));
+ assertEquals("Set Iterator", SetIteratorPrototype[Symbol.toStringTag]);
+ var desc = Object.getOwnPropertyDescriptor(
+ SetIteratorPrototype, Symbol.toStringTag);
+ assertTrue(desc.configurable);
+ assertFalse(desc.writable);
+ assertEquals("Set Iterator", desc.value);
})();
@@ -120,6 +129,15 @@
assertEquals(new Map().values().__proto__, MapIteratorPrototype);
assertEquals(new Map().keys().__proto__, MapIteratorPrototype);
assertEquals(new Map().entries().__proto__, MapIteratorPrototype);
+
+ assertEquals("[object Map Iterator]",
+ Object.prototype.toString.call(iter));
+ assertEquals("Map Iterator", MapIteratorPrototype[Symbol.toStringTag]);
+ var desc = Object.getOwnPropertyDescriptor(
+ MapIteratorPrototype, Symbol.toStringTag);
+ assertTrue(desc.configurable);
+ assertFalse(desc.writable);
+ assertEquals("Map Iterator", desc.value);
})();
diff --git a/test/mjsunit/es6/json.js b/test/mjsunit/es6/json.js
new file mode 100644
index 0000000..3fad083
--- /dev/null
+++ b/test/mjsunit/es6/json.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-tostring
+
+function testJSONToString() {
+ assertEquals('[object JSON]', "" + JSON);
+ assertEquals("JSON", JSON[Symbol.toStringTag]);
+ var desc = Object.getOwnPropertyDescriptor(JSON, Symbol.toStringTag);
+ assertTrue(desc.configurable);
+ assertFalse(desc.writable);
+ assertEquals("JSON", desc.value);
+}
+testJSONToString();
diff --git a/test/mjsunit/es6/math.js b/test/mjsunit/es6/math.js
new file mode 100644
index 0000000..3f76f11
--- /dev/null
+++ b/test/mjsunit/es6/math.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-tostring
+
+function testMathToString() {
+ assertEquals('[object Math]', "" + Math);
+ assertEquals("Math", Math[Symbol.toStringTag]);
+ var desc = Object.getOwnPropertyDescriptor(Math, Symbol.toStringTag);
+ assertTrue(desc.configurable);
+ assertFalse(desc.writable);
+ assertEquals("Math", desc.value);
+}
+testMathToString();
diff --git a/test/mjsunit/es6/string-iterator.js b/test/mjsunit/es6/string-iterator.js
index e6bea6d..769f549 100644
--- a/test/mjsunit/es6/string-iterator.js
+++ b/test/mjsunit/es6/string-iterator.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-tostring
function TestStringPrototypeIterator() {
assertTrue(String.prototype.hasOwnProperty(Symbol.iterator));
@@ -59,6 +60,12 @@
assertArrayEquals(['next'],
Object.getOwnPropertyNames(StringIteratorPrototype));
assertEquals('[object String Iterator]', "" + iterator);
+ assertEquals("String Iterator", StringIteratorPrototype[Symbol.toStringTag]);
+ var desc = Object.getOwnPropertyDescriptor(
+ StringIteratorPrototype, Symbol.toStringTag);
+ assertTrue(desc.configurable);
+ assertFalse(desc.writable);
+ assertEquals("String Iterator", desc.value);
}
TestStringIteratorPrototype();
diff --git a/test/mjsunit/harmony/proxies.js b/test/mjsunit/harmony/proxies.js
index b082c06..2b0ec76 100644
--- a/test/mjsunit/harmony/proxies.js
+++ b/test/mjsunit/harmony/proxies.js
@@ -29,7 +29,7 @@
// test enters an infinite recursion which goes through the runtime and we
// overflow the system stack before the simulator stack.
-// Flags: --harmony-proxies --sim-stack-size=500
+// Flags: --harmony-proxies --sim-stack-size=500 --turbo-deoptimization
// Helper.
diff --git a/test/mjsunit/harmony/typedarrays.js b/test/mjsunit/harmony/typedarrays.js
index 537fba3..a4d6e79 100644
--- a/test/mjsunit/harmony/typedarrays.js
+++ b/test/mjsunit/harmony/typedarrays.js
@@ -265,6 +265,17 @@
assertSame(0, aNoParam.length);
assertSame(0, aNoParam.byteLength);
assertSame(0, aNoParam.byteOffset);
+
+ var a = new constr(ab, 64*elementSize, 128);
+ assertEquals("[object " + constr.name + "]",
+ Object.prototype.toString.call(a));
+ var desc = Object.getOwnPropertyDescriptor(
+ constr.prototype, Symbol.toStringTag);
+ assertTrue(desc.configurable);
+ assertFalse(desc.enumerable);
+ assertFalse(!!desc.writable);
+ assertFalse(!!desc.set);
+ assertEquals("function", typeof desc.get);
}
TestTypedArray(Uint8Array, 1, 0xFF);
@@ -654,6 +665,19 @@
TestDataViewPropertyTypeChecks();
+
+function TestDataViewToStringTag() {
+ var a = new DataView(new ArrayBuffer(10));
+ assertEquals("[object DataView]", Object.prototype.toString.call(a));
+ var desc = Object.getOwnPropertyDescriptor(
+ DataView.prototype, Symbol.toStringTag);
+ assertTrue(desc.configurable);
+ assertFalse(desc.enumerable);
+ assertFalse(desc.writable);
+ assertEquals("DataView", desc.value);
+}
+
+
// General tests for properties
// Test property attribute [[Enumerable]]
diff --git a/test/mjsunit/regress/regress-3643.js b/test/mjsunit/regress/regress-3643.js
new file mode 100644
index 0000000..cc61a1c
--- /dev/null
+++ b/test/mjsunit/regress/regress-3643.js
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = [1, 2, 3];
+Object.defineProperty(a, '1', {
+ get: function() { delete this[1]; return undefined; },
+ configurable: true
+});
+var s = a.slice(1);
+assertTrue('0' in s);
+
+// Sparse case should hit the same code as above due to presence of the getter.
+a = [1, 2, 3];
+a[0xffff] = 4;
+Object.defineProperty(a, '1', {
+ get: function() { delete this[1]; return undefined; },
+ configurable: true
+});
+s = a.slice(1);
+assertTrue('0' in s);
diff --git a/test/mjsunit/regress/regress-417709a.js b/test/mjsunit/regress/regress-417709a.js
index d210c10..7c4d4f7 100644
--- a/test/mjsunit/regress/regress-417709a.js
+++ b/test/mjsunit/regress/regress-417709a.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --stack-size=100
+// Flags: --stack-size=100 --turbo-deoptimization
var a = [];
diff --git a/test/mjsunit/regress/regress-shift-enumerable.js b/test/mjsunit/regress/regress-shift-enumerable.js
new file mode 100644
index 0000000..f3ee258
--- /dev/null
+++ b/test/mjsunit/regress/regress-shift-enumerable.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var arr = [1, 2];
+Object.defineProperty(arr, 0xfffe, {
+ value: 3,
+ configurable: true,
+ writable: true,
+ enumerable: false
+});
+arr[0xffff] = 4;
+arr.shift();
+var desc = Object.getOwnPropertyDescriptor(arr, 0xfffe);
+assertEquals(4, desc.value);
+assertFalse(desc.enumerable);
diff --git a/test/unittests/base/bits-unittest.cc b/test/unittests/base/bits-unittest.cc
index be41007..9caba84 100644
--- a/test/unittests/base/bits-unittest.cc
+++ b/test/unittests/base/bits-unittest.cc
@@ -224,6 +224,58 @@
}
}
+
+TEST(Bits, SignedDiv32) {
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(),
+ SignedDiv32(std::numeric_limits<int32_t>::min(), -1));
+ EXPECT_EQ(std::numeric_limits<int32_t>::max(),
+ SignedDiv32(std::numeric_limits<int32_t>::max(), 1));
+ TRACED_FORRANGE(int32_t, i, 0, 50) {
+ EXPECT_EQ(0, SignedDiv32(i, 0));
+ TRACED_FORRANGE(int32_t, j, 1, i) {
+ EXPECT_EQ(1, SignedDiv32(j, j));
+ EXPECT_EQ(i / j, SignedDiv32(i, j));
+ EXPECT_EQ(-i / j, SignedDiv32(i, -j));
+ }
+ }
+}
+
+
+TEST(Bits, SignedMod32) {
+ EXPECT_EQ(0, SignedMod32(std::numeric_limits<int32_t>::min(), -1));
+ EXPECT_EQ(0, SignedMod32(std::numeric_limits<int32_t>::max(), 1));
+ TRACED_FORRANGE(int32_t, i, 0, 50) {
+ EXPECT_EQ(0, SignedMod32(i, 0));
+ TRACED_FORRANGE(int32_t, j, 1, i) {
+ EXPECT_EQ(0, SignedMod32(j, j));
+ EXPECT_EQ(i % j, SignedMod32(i, j));
+ EXPECT_EQ(i % j, SignedMod32(i, -j));
+ }
+ }
+}
+
+
+TEST(Bits, UnsignedDiv32) {
+ TRACED_FORRANGE(uint32_t, i, 0, 50) {
+ EXPECT_EQ(0u, UnsignedDiv32(i, 0));
+ TRACED_FORRANGE(uint32_t, j, i + 1, 100) {
+ EXPECT_EQ(1u, UnsignedDiv32(j, j));
+ EXPECT_EQ(i / j, UnsignedDiv32(i, j));
+ }
+ }
+}
+
+
+TEST(Bits, UnsignedMod32) {
+ TRACED_FORRANGE(uint32_t, i, 0, 50) {
+ EXPECT_EQ(0u, UnsignedMod32(i, 0));
+ TRACED_FORRANGE(uint32_t, j, i + 1, 100) {
+ EXPECT_EQ(0u, UnsignedMod32(j, j));
+ EXPECT_EQ(i % j, UnsignedMod32(i, j));
+ }
+ }
+}
+
} // namespace bits
} // namespace base
} // namespace v8
diff --git a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 81ed616..1f0c8ad 100644
--- a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -633,6 +633,8 @@
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
@@ -687,6 +689,8 @@
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
ASSERT_LE(1U, s[0]->InputCount());
EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
diff --git a/test/unittests/compiler/js-operator-unittest.cc b/test/unittests/compiler/js-operator-unittest.cc
index 8e601ba..5de4235 100644
--- a/test/unittests/compiler/js-operator-unittest.cc
+++ b/test/unittests/compiler/js-operator-unittest.cc
@@ -66,12 +66,12 @@
SHARED(ToNumber, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
SHARED(ToString, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
SHARED(ToName, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(ToObject, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1),
SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
SHARED(Create, Operator::kEliminatable, 0, 0, 1, 0, 1, 1),
- SHARED(HasProperty, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
+ SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0),
- SHARED(InstanceOf, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
+ SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
SHARED(Debugger, Operator::kNoProperties, 0, 0, 1, 1, 0, 1),
SHARED(CreateFunctionContext, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
diff --git a/test/unittests/compiler/machine-operator-reducer-unittest.cc b/test/unittests/compiler/machine-operator-reducer-unittest.cc
index a5b1cfe..461c2be 100644
--- a/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -731,19 +731,111 @@
}
+TEST_F(MachineOperatorReducerTest, Int32DivWithParameters) {
+ Node* const p0 = Parameter(0);
+ Reduction const r = Reduce(graph()->NewNode(machine()->Int32Div(), p0, p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord32Equal(IsWord32Equal(p0, IsInt32Constant(0)), IsInt32Constant(0)));
+}
+
+
+// -----------------------------------------------------------------------------
+// Uint32Div
+
+
+TEST_F(MachineOperatorReducerTest, Uint32DivWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Div(), Int32Constant(0), p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Div(), p0, Int32Constant(0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Div(), p0, Int32Constant(1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement(), p0);
+ }
+ TRACED_FOREACH(uint32_t, dividend, kUint32Values) {
+ TRACED_FOREACH(uint32_t, divisor, kUint32Values) {
+ Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Div(),
+ Uint32Constant(dividend),
+ Uint32Constant(divisor)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(bit_cast<int32_t>(
+ base::bits::UnsignedDiv32(dividend, divisor))));
+ }
+ }
+ TRACED_FORRANGE(uint32_t, shift, 1, 31) {
+ Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Div(), p0,
+ Uint32Constant(1u << shift)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord32Shr(p0, IsInt32Constant(bit_cast<int32_t>(shift))));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Uint32DivWithParameters) {
+ Node* const p0 = Parameter(0);
+ Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Div(), p0, p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsWord32Equal(IsWord32Equal(p0, IsInt32Constant(0)), IsInt32Constant(0)));
+}
+
+
// -----------------------------------------------------------------------------
// Int32Mod
TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
Node* const p0 = Parameter(0);
- static const int32_t kOnes[] = {-1, 1};
- TRACED_FOREACH(int32_t, one, kOnes) {
+ {
Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(one)));
+ Reduce(graph()->NewNode(machine()->Int32Mod(), Int32Constant(0), p0));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(-1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ TRACED_FOREACH(int32_t, dividend, kInt32Values) {
+ TRACED_FOREACH(int32_t, divisor, kInt32Values) {
+ Reduction const r = Reduce(graph()->NewNode(machine()->Int32Mod(),
+ Int32Constant(dividend),
+ Int32Constant(divisor)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(base::bits::SignedMod32(dividend, divisor)));
+ }
+ }
TRACED_FORRANGE(int32_t, shift, 1, 30) {
Reduction const r = Reduce(
graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(1 << shift)));
@@ -797,6 +889,68 @@
}
+TEST_F(MachineOperatorReducerTest, Int32ModWithParameters) {
+ Node* const p0 = Parameter(0);
+ Reduction const r = Reduce(graph()->NewNode(machine()->Int32Mod(), p0, p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// Uint32Mod
+
+
+TEST_F(MachineOperatorReducerTest, Uint32ModWithConstant) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Mod(), p0, Int32Constant(0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Mod(), Int32Constant(0), p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Mod(), p0, Int32Constant(1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ TRACED_FOREACH(uint32_t, dividend, kUint32Values) {
+ TRACED_FOREACH(uint32_t, divisor, kUint32Values) {
+ Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Mod(),
+ Uint32Constant(dividend),
+ Uint32Constant(divisor)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(bit_cast<int32_t>(
+ base::bits::UnsignedMod32(dividend, divisor))));
+ }
+ }
+ TRACED_FORRANGE(uint32_t, shift, 1, 31) {
+ Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Mod(), p0,
+ Uint32Constant(1u << shift)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord32And(p0, IsInt32Constant(
+ bit_cast<int32_t>((1u << shift) - 1u))));
+ }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Uint32ModWithParameters) {
+ Node* const p0 = Parameter(0);
+ Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Mod(), p0, p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+}
+
+
// -----------------------------------------------------------------------------
// Int32AddWithOverflow
diff --git a/test/unittests/compiler/machine-operator-unittest.cc b/test/unittests/compiler/machine-operator-unittest.cc
index 9d66f9e..41ac5c1 100644
--- a/test/unittests/compiler/machine-operator-unittest.cc
+++ b/test/unittests/compiler/machine-operator-unittest.cc
@@ -202,7 +202,9 @@
PURE(Float64Div, 2, 1), PURE(Float64Mod, 2, 1),
PURE(Float64Sqrt, 1, 1), PURE(Float64Equal, 2, 1),
PURE(Float64LessThan, 2, 1), PURE(Float64LessThanOrEqual, 2, 1),
- PURE(LoadStackPointer, 0, 1)
+ PURE(LoadStackPointer, 0, 1), PURE(Float64Floor, 1, 1),
+ PURE(Float64Ceil, 1, 1), PURE(Float64RoundTruncate, 1, 1),
+ PURE(Float64RoundTiesAway, 1, 1),
#undef PURE
};
diff --git a/tools/gdbinit b/tools/gdbinit
index 220959d..8d0345a 100644
--- a/tools/gdbinit
+++ b/tools/gdbinit
@@ -40,7 +40,7 @@
# Print JavaScript stack trace.
define jst
-print v8::internal::Isolate::Current()->PrintStack(stdout)
+print v8::internal::Isolate::Current()->PrintStack((FILE*) stdout)
end
document jst
Print the current JavaScript stack trace
diff --git a/tools/whitespace.txt b/tools/whitespace.txt
index 4430966..404751a 100644
--- a/tools/whitespace.txt
+++ b/tools/whitespace.txt
@@ -5,4 +5,4 @@
A Smi walks into a bar and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up..
+The Smi looked at them when a crazy v8-autoroll account showed up......