Merge v8 from https://chromium.googlesource.com/v8/v8.git at bfb312d4b40507780e15bfee2e0dc32f1391c4db
This commit was generated by merge_from_chromium.py.
Change-Id: Iddea45e6da7565e36b0259ff3f92ebdad96f6ac5
diff --git a/ChangeLog b/ChangeLog
index 6e9f6c7..97d2d18 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2014-10-28: Version 3.30.20
+
+ Performance and stability improvements on all platforms.
+
+
2014-10-27: Version 3.30.19
Check string literals with escapes in PreParserTraits::GetSymbol()
diff --git a/include/v8.h b/include/v8.h
index f8519db..f70f457 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1703,14 +1703,24 @@
*/
bool IsDataView() const;
- Local<Boolean> ToBoolean() const;
- Local<Number> ToNumber() const;
- Local<String> ToString() const;
- Local<String> ToDetailString() const;
- Local<Object> ToObject() const;
- Local<Integer> ToInteger() const;
- Local<Uint32> ToUint32() const;
- Local<Int32> ToInt32() const;
+ Local<Boolean> ToBoolean(Isolate* isolate) const;
+ Local<Number> ToNumber(Isolate* isolate) const;
+ Local<String> ToString(Isolate* isolate) const;
+ Local<String> ToDetailString(Isolate* isolate) const;
+ Local<Object> ToObject(Isolate* isolate) const;
+ Local<Integer> ToInteger(Isolate* isolate) const;
+ Local<Uint32> ToUint32(Isolate* isolate) const;
+ Local<Int32> ToInt32(Isolate* isolate) const;
+
+ // TODO(dcarney): deprecate all these.
+ inline Local<Boolean> ToBoolean() const;
+ inline Local<Number> ToNumber() const;
+ inline Local<String> ToString() const;
+ inline Local<String> ToDetailString() const;
+ inline Local<Object> ToObject() const;
+ inline Local<Integer> ToInteger() const;
+ inline Local<Uint32> ToUint32() const;
+ inline Local<Int32> ToInt32() const;
/**
* Attempts to convert a string to an array index.
@@ -6638,6 +6648,44 @@
}
+Local<Boolean> Value::ToBoolean() const {
+ return ToBoolean(Isolate::GetCurrent());
+}
+
+
+Local<Number> Value::ToNumber() const {
+ return ToNumber(Isolate::GetCurrent());
+}
+
+
+Local<String> Value::ToString() const {
+ return ToString(Isolate::GetCurrent());
+}
+
+
+Local<String> Value::ToDetailString() const {
+ return ToDetailString(Isolate::GetCurrent());
+}
+
+
+Local<Object> Value::ToObject() const {
+ return ToObject(Isolate::GetCurrent());
+}
+
+
+Local<Integer> Value::ToInteger() const {
+ return ToInteger(Isolate::GetCurrent());
+}
+
+
+Local<Uint32> Value::ToUint32() const {
+ return ToUint32(Isolate::GetCurrent());
+}
+
+
+Local<Int32> Value::ToInt32() const { return ToInt32(Isolate::GetCurrent()); }
+
+
Name* Name::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
diff --git a/src/api.cc b/src/api.cc
index 1698a3e..909335d 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2580,13 +2580,13 @@
}
-Local<String> Value::ToString() const {
+Local<String> Value::ToString(Isolate* v8_isolate) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> str;
if (obj->IsString()) {
str = obj;
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, "ToString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2598,13 +2598,13 @@
}
-Local<String> Value::ToDetailString() const {
+Local<String> Value::ToDetailString(Isolate* v8_isolate) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> str;
if (obj->IsString()) {
str = obj;
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, "ToDetailString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2616,13 +2616,13 @@
}
-Local<v8::Object> Value::ToObject() const {
+Local<v8::Object> Value::ToObject(Isolate* v8_isolate) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> val;
if (obj->IsJSObject()) {
val = obj;
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, "ToObject");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2634,12 +2634,12 @@
}
-Local<Boolean> Value::ToBoolean() const {
+Local<Boolean> Value::ToBoolean(Isolate* v8_isolate) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsBoolean()) {
return ToApiHandle<Boolean>(obj);
} else {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, "ToBoolean");
ENTER_V8(isolate);
i::Handle<i::Object> val =
@@ -2649,13 +2649,13 @@
}
-Local<Number> Value::ToNumber() const {
+Local<Number> Value::ToNumber(Isolate* v8_isolate) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsNumber()) {
num = obj;
} else {
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, "ToNumber");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2667,13 +2667,13 @@
}
-Local<Integer> Value::ToInteger() const {
+Local<Integer> Value::ToInteger(Isolate* v8_isolate) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsSmi()) {
num = obj;
} else {
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, "ToInteger");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2935,13 +2935,13 @@
}
-Local<Int32> Value::ToInt32() const {
+Local<Int32> Value::ToInt32(Isolate* v8_isolate) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsSmi()) {
num = obj;
} else {
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, "ToInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2952,13 +2952,13 @@
}
-Local<Uint32> Value::ToUint32() const {
+Local<Uint32> Value::ToUint32(Isolate* v8_isolate) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsSmi()) {
num = obj;
} else {
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
LOG_API(isolate, "ToUInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -6916,7 +6916,7 @@
ENTER_V8(isolate);
i::HandleScope scope(isolate);
TryCatch try_catch;
- Handle<String> str = obj->ToString();
+ Handle<String> str = obj->ToString(reinterpret_cast<v8::Isolate*>(isolate));
if (str.IsEmpty()) return;
i::Handle<i::String> i_str = Utils::OpenHandle(*str);
length_ = v8::Utf8Length(*i_str, isolate);
@@ -6937,7 +6937,7 @@
ENTER_V8(isolate);
i::HandleScope scope(isolate);
TryCatch try_catch;
- Handle<String> str = obj->ToString();
+ Handle<String> str = obj->ToString(reinterpret_cast<v8::Isolate*>(isolate));
if (str.IsEmpty()) return;
length_ = str->Length();
str_ = i::NewArray<uint16_t>(length_ + 1);
diff --git a/src/array.js b/src/array.js
index 55dd797..29fa831 100644
--- a/src/array.js
+++ b/src/array.js
@@ -302,12 +302,8 @@
for (var i = len - del_count; i > start_i; i--) {
var from_index = i + del_count - 1;
var to_index = i + num_additional_args - 1;
- // The spec could also be interpreted such that
- // %HasOwnProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[from_index];
- if (!IS_UNDEFINED(current) || from_index in array) {
- array[to_index] = current;
+ if (from_index in array) {
+ array[to_index] = array[from_index];
} else {
delete array[to_index];
}
@@ -316,12 +312,8 @@
for (var i = start_i; i < len - del_count; i++) {
var from_index = i + del_count;
var to_index = i + num_additional_args;
- // The spec could also be interpreted such that
- // %HasOwnProperty would be the appropriate test. We follow
- // KJS in consulting the prototype.
- var current = array[from_index];
- if (!IS_UNDEFINED(current) || from_index in array) {
- array[to_index] = current;
+ if (from_index in array) {
+ array[to_index] = array[from_index];
} else {
delete array[to_index];
}
diff --git a/src/compilation-statistics.cc b/src/compilation-statistics.cc
index eb5b60c..2686ff7 100644
--- a/src/compilation-statistics.cc
+++ b/src/compilation-statistics.cc
@@ -47,7 +47,8 @@
void CompilationStatistics::BasicStats::Accumulate(const BasicStats& stats) {
delta_ += stats.delta_;
total_allocated_bytes_ += stats.total_allocated_bytes_;
- if (stats.max_allocated_bytes_ > max_allocated_bytes_) {
+ if (stats.absolute_max_allocated_bytes_ > absolute_max_allocated_bytes_) {
+ absolute_max_allocated_bytes_ = stats.absolute_max_allocated_bytes_;
max_allocated_bytes_ = stats.max_allocated_bytes_;
function_name_ = stats.function_name_;
}
@@ -66,9 +67,12 @@
static_cast<double>(stats.total_allocated_bytes_ * 100) /
static_cast<double>(total_stats.total_allocated_bytes_);
base::OS::SNPrintF(buffer, kBufferSize,
- "%28s %10.3f ms / %5.1f %% %10u total / %5.1f %% %10u max",
+ "%28s %10.3f ms / %5.1f %%"
+ "%10u total / %5.1f %% "
+ "%10u max %10u abs_max",
name, ms, percent, stats.total_allocated_bytes_,
- size_percent, stats.max_allocated_bytes_);
+ size_percent, stats.max_allocated_bytes_,
+ stats.absolute_max_allocated_bytes_);
os << buffer;
if (stats.function_name_.size() > 0) {
@@ -79,8 +83,8 @@
static void WriteFullLine(std::ostream& os) {
- os << "-----------------------------------------------"
- "-----------------------------------------------\n";
+ os << "--------------------------------------------------------"
+ "--------------------------------------------------------\n";
}
@@ -92,8 +96,8 @@
static void WritePhaseKindBreak(std::ostream& os) {
- os << " ------------------"
- "-----------------------------------------------\n";
+ os << " ---------------------------"
+ "--------------------------------------------------------\n";
}
diff --git a/src/compilation-statistics.h b/src/compilation-statistics.h
index 62fefe3..45ffb9b 100644
--- a/src/compilation-statistics.h
+++ b/src/compilation-statistics.h
@@ -22,13 +22,17 @@
class BasicStats {
public:
- BasicStats() : total_allocated_bytes_(0), max_allocated_bytes_(0) {}
+ BasicStats()
+ : total_allocated_bytes_(0),
+ max_allocated_bytes_(0),
+ absolute_max_allocated_bytes_(0) {}
void Accumulate(const BasicStats& stats);
base::TimeDelta delta_;
size_t total_allocated_bytes_;
size_t max_allocated_bytes_;
+ size_t absolute_max_allocated_bytes_;
std::string function_name_;
};
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index c218587..c0e5004 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -682,7 +682,7 @@
__ stm(db_w, sp, saves);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -914,7 +914,7 @@
void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
- if (!linkage()->info()->IsStub()) {
+ if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 04acfb8..c3a4f40 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -752,7 +752,7 @@
__ PushCalleeSavedRegisters();
frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
__ SetStackPointer(jssp);
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
@@ -967,7 +967,7 @@
void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
- if (!linkage()->info()->IsStub()) {
+ if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
intptr_t current_pc = masm()->pc_offset();
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index b972601..b230090 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -22,10 +22,10 @@
: StructuredGraphBuilder(local_zone, jsgraph->graph(), jsgraph->common()),
info_(info),
jsgraph_(jsgraph),
- globals_(0, info->zone()),
+ globals_(0, local_zone),
breakable_(NULL),
execution_context_(NULL) {
- InitializeAstVisitor(info->zone());
+ InitializeAstVisitor(local_zone);
}
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 4f278fd..7ca27ce 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -13,10 +13,11 @@
namespace compiler {
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code)
+ InstructionSequence* code, CompilationInfo* info)
: frame_(frame),
linkage_(linkage),
code_(code),
+ info_(info),
current_block_(BasicBlock::RpoNumber::Invalid()),
current_source_position_(SourcePosition::Invalid()),
masm_(code->zone()->isolate(), NULL, 0),
@@ -29,7 +30,7 @@
Handle<Code> CodeGenerator::GenerateCode() {
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
// Emit a code line info recording start event.
PositionsRecorder* recorder = masm()->positions_recorder();
@@ -166,7 +167,7 @@
masm()->positions_recorder()->WriteRecordedPositions();
if (FLAG_code_comments) {
Vector<char> buffer = Vector<char>::New(256);
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
int ln = Script::GetLineNumber(info->script(), code_pos);
int cn = Script::GetColumnNumber(info->script(), code_pos);
if (info->script()->name()->IsString()) {
@@ -196,7 +197,7 @@
void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
int deopt_count = static_cast<int>(deoptimization_states_.size());
if (deopt_count == 0) return;
Handle<DeoptimizationInputData> data =
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index d7ce6d5..494f91e 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -21,7 +21,7 @@
class CodeGenerator FINAL : public GapResolver::Assembler {
public:
explicit CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code);
+ InstructionSequence* code, CompilationInfo* info);
// Generate native code.
Handle<Code> GenerateCode();
@@ -36,6 +36,7 @@
GapResolver* resolver() { return &resolver_; }
SafepointTableBuilder* safepoints() { return &safepoints_; }
Zone* zone() const { return code()->zone(); }
+ CompilationInfo* info() const { return info_; }
// Checks if {block} will appear directly after {current_block_} when
// assembling code, in which case, a fall-through can be used.
@@ -118,6 +119,7 @@
Frame* const frame_;
Linkage* const linkage_;
InstructionSequence* const code_;
+ CompilationInfo* const info_;
BasicBlock::RpoNumber current_block_;
SourcePosition current_source_position_;
MacroAssembler masm_;
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 9b077c3..31c3d12 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -131,7 +131,7 @@
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
-class CommonOperatorBuilder FINAL {
+class CommonOperatorBuilder FINAL : public ZoneObject {
public:
explicit CommonOperatorBuilder(Zone* zone);
@@ -174,6 +174,8 @@
const CommonOperatorBuilderImpl& impl_;
Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(CommonOperatorBuilder);
};
} // namespace compiler
diff --git a/src/compiler/control-reducer.cc b/src/compiler/control-reducer.cc
index 03d0583..e1bd0c9 100644
--- a/src/compiler/control-reducer.cc
+++ b/src/compiler/control-reducer.cc
@@ -14,7 +14,8 @@
namespace internal {
namespace compiler {
-enum VisitState { kUnvisited, kOnStack, kRevisit, kVisited };
+enum VisitState { kUnvisited = 0, kOnStack = 1, kRevisit = 2, kVisited = 3 };
+enum Reachability { kFromStart = 8 };
#define TRACE(x) \
if (FLAG_trace_turbo) PrintF x
@@ -39,23 +40,169 @@
ZoneDeque<Node*> revisit_;
Node* dead_;
- void Trim() {
- // Mark all nodes reachable from end.
- NodeVector nodes(zone_);
- state_.assign(jsgraph_->graph()->NodeCount(), kUnvisited);
- Push(jsgraph_->graph()->end());
- while (!stack_.empty()) {
- Node* node = stack_[stack_.size() - 1];
- stack_.pop_back();
- state_[node->id()] = kVisited;
- nodes.push_back(node);
- for (InputIter i = node->inputs().begin(); i != node->inputs().end();
- ++i) {
- Recurse(*i); // pushes node onto the stack if necessary.
+ void Reduce() {
+ Push(graph()->end());
+ do {
+ // Process the node on the top of the stack, potentially pushing more
+ // or popping the node off the stack.
+ ReduceTop();
+ // If the stack becomes empty, revisit any nodes in the revisit queue.
+ // If no nodes in the revisit queue, try removing dead loops.
+ // If no dead loops, then finish.
+ } while (!stack_.empty() || TryRevisit() || RepairAndRemoveLoops());
+ }
+
+ bool TryRevisit() {
+ while (!revisit_.empty()) {
+ Node* n = revisit_.back();
+ revisit_.pop_back();
+ if (state_[n->id()] == kRevisit) { // state can change while in queue.
+ Push(n);
+ return true;
}
}
+ return false;
+ }
+
+ // Repair the graph after the possible creation of non-terminating or dead
+ // loops. Removing dead loops can produce more opportunities for reduction.
+ bool RepairAndRemoveLoops() {
+ // TODO(turbofan): we can skip this if the graph has no loops, but
+ // we have to be careful about proper loop detection during reduction.
+
+ // Gather all nodes backwards-reachable from end (through inputs).
+ state_.assign(graph()->NodeCount(), kUnvisited);
+ NodeVector nodes(zone_);
+ AddNodesReachableFromEnd(nodes);
+
+ // Walk forward through control nodes, looking for back edges to nodes
+ // that are not connected to end. Those are non-terminating loops (NTLs).
+ Node* start = graph()->start();
+ ZoneVector<byte> fw_reachability(graph()->NodeCount(), 0, zone_);
+ fw_reachability[start->id()] = kFromStart | kOnStack;
+ stack_.push_back(start);
+
+ while (!stack_.empty()) {
+ Node* node = stack_.back();
+ TRACE(("ControlFw: #%d:%s\n", node->id(), node->op()->mnemonic()));
+ bool pop = true;
+ for (Node* const succ : node->uses()) {
+ byte reach = fw_reachability[succ->id()];
+ if ((reach & kOnStack) != 0 && state_[succ->id()] != kVisited) {
+ // {succ} is on stack and not reachable from end.
+ ConnectNTL(nodes, succ);
+ fw_reachability.resize(graph()->NodeCount(), 0);
+ pop = false; // continue traversing inputs to this node.
+ break;
+ }
+ if ((reach & kFromStart) == 0 &&
+ IrOpcode::IsControlOpcode(succ->opcode())) {
+ // {succ} is a control node and not yet reached from start.
+ fw_reachability[succ->id()] |= kFromStart | kOnStack;
+ stack_.push_back(succ);
+ pop = false; // "recurse" into successor control node.
+ break;
+ }
+ }
+ if (pop) {
+ fw_reachability[node->id()] &= ~kOnStack;
+ stack_.pop_back();
+ }
+ }
+
+ // Trim references from dead nodes to live nodes first.
+ jsgraph_->GetCachedNodes(&nodes);
+ TrimNodes(nodes);
+
+ // Any control nodes not reachable from start are dead, even loops.
+ for (size_t i = 0; i < nodes.size(); i++) {
+ Node* node = nodes[i];
+ byte reach = fw_reachability[node->id()];
+ if ((reach & kFromStart) == 0 &&
+ IrOpcode::IsControlOpcode(node->opcode())) {
+ ReplaceNode(node, dead()); // uses will be added to revisit queue.
+ }
+ }
+ return TryRevisit(); // try to push a node onto the stack.
+ }
+
+ // Connect {loop}, the header of a non-terminating loop, to the end node.
+ void ConnectNTL(NodeVector& nodes, Node* loop) {
+ TRACE(("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic()));
+
+ if (loop->opcode() != IrOpcode::kTerminate) {
+ // Insert a {Terminate} node if the loop has effects.
+ ZoneDeque<Node*> effects(zone_);
+ for (Node* const use : loop->uses()) {
+ if (use->opcode() == IrOpcode::kEffectPhi) effects.push_back(use);
+ }
+ int count = static_cast<int>(effects.size());
+ if (count > 0) {
+ Node** inputs = zone_->NewArray<Node*>(1 + count);
+ for (int i = 0; i < count; i++) inputs[i] = effects[i];
+ inputs[count] = loop;
+ loop = graph()->NewNode(common_->Terminate(count), 1 + count, inputs);
+ TRACE(("AddTerminate: #%d:%s[%d]\n", loop->id(), loop->op()->mnemonic(),
+ count));
+ }
+ }
+
+ Node* to_add = loop;
+ Node* end = graph()->end();
+ CHECK_EQ(IrOpcode::kEnd, end->opcode());
+ Node* merge = end->InputAt(0);
+ if (merge == NULL || merge->opcode() == IrOpcode::kDead) {
+ // The end node died; just connect end to {loop}.
+ end->ReplaceInput(0, loop);
+ } else if (merge->opcode() != IrOpcode::kMerge) {
+ // Introduce a final merge node for {end->InputAt(0)} and {loop}.
+ merge = graph()->NewNode(common_->Merge(2), merge, loop);
+ end->ReplaceInput(0, merge);
+ to_add = merge;
+ } else {
+ // Append a new input to the final merge at the end.
+ merge->AppendInput(graph()->zone(), loop);
+ merge->set_op(common_->Merge(merge->InputCount()));
+ }
+ nodes.push_back(to_add);
+ state_.resize(graph()->NodeCount(), kUnvisited);
+ state_[to_add->id()] = kVisited;
+ AddBackwardsReachableNodes(nodes, nodes.size() - 1);
+ }
+
+ void AddNodesReachableFromEnd(NodeVector& nodes) {
+ Node* end = graph()->end();
+ state_[end->id()] = kVisited;
+ if (!end->IsDead()) {
+ nodes.push_back(end);
+ AddBackwardsReachableNodes(nodes, nodes.size() - 1);
+ }
+ }
+
+ void AddBackwardsReachableNodes(NodeVector& nodes, size_t cursor) {
+ while (cursor < nodes.size()) {
+ Node* node = nodes[cursor++];
+ for (Node* const input : node->inputs()) {
+ if (state_[input->id()] != kVisited) {
+ state_[input->id()] = kVisited;
+ nodes.push_back(input);
+ }
+ }
+ }
+ }
+
+ void Trim() {
+ // Gather all nodes backwards-reachable from end through inputs.
+ state_.assign(graph()->NodeCount(), kUnvisited);
+ NodeVector nodes(zone_);
+ AddNodesReachableFromEnd(nodes);
+
// Process cached nodes in the JSGraph too.
jsgraph_->GetCachedNodes(&nodes);
+ TrimNodes(nodes);
+ }
+
+ void TrimNodes(NodeVector& nodes) {
// Remove dead->live edges.
for (size_t j = 0; j < nodes.size(); j++) {
Node* node = nodes[j];
@@ -75,18 +222,46 @@
// Verify that no inputs to live nodes are NULL.
for (size_t j = 0; j < nodes.size(); j++) {
Node* node = nodes[j];
- for (InputIter i = node->inputs().begin(); i != node->inputs().end();
- ++i) {
- CHECK_NE(NULL, *i);
+ for (Node* const input : node->inputs()) {
+ CHECK_NE(NULL, input);
}
- for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
- size_t id = static_cast<size_t>((*i)->id());
+ for (Node* const use : node->uses()) {
+ size_t id = static_cast<size_t>(use->id());
CHECK_EQ(kVisited, state_[id]);
}
}
#endif
}
+ // Reduce the node on the top of the stack.
+ // If an input {i} is not yet visited or needs to be revisited, push {i} onto
+ // the stack and return. Otherwise, all inputs are visited, so apply
+ // reductions for {node} and pop it off the stack.
+ void ReduceTop() {
+ size_t height = stack_.size();
+ Node* node = stack_.back();
+
+ if (node->IsDead()) return Pop(); // Node was killed while on stack.
+
+ TRACE(("ControlReduce: #%d:%s\n", node->id(), node->op()->mnemonic()));
+
+ // Recurse on an input if necessary.
+ for (Node* const input : node->inputs()) {
+ if (Recurse(input)) return;
+ }
+
+ // All inputs should be visited or on stack. Apply reductions to node.
+ Node* replacement = ReduceNode(node);
+ if (replacement != node) ReplaceNode(node, replacement);
+
+ // After reducing the node, pop it off the stack.
+ CHECK_EQ(static_cast<int>(height), static_cast<int>(stack_.size()));
+ Pop();
+
+ // If there was a replacement, reduce it after popping {node}.
+ if (replacement != node) Recurse(replacement);
+ }
+
// Push a node onto the stack if its state is {kUnvisited} or {kRevisit}.
bool Recurse(Node* node) {
size_t id = static_cast<size_t>(node->id());
@@ -103,13 +278,223 @@
state_[node->id()] = kOnStack;
stack_.push_back(node);
}
+
+ void Pop() {
+ int pos = static_cast<int>(stack_.size()) - 1;
+ DCHECK_GE(pos, 0);
+ DCHECK_EQ(kOnStack, state_[stack_[pos]->id()]);
+ state_[stack_[pos]->id()] = kVisited;
+ stack_.pop_back();
+ }
+
+ // Queue a node to be revisited if it has been visited once already.
+ void Revisit(Node* node) {
+ size_t id = static_cast<size_t>(node->id());
+ if (id < state_.size() && state_[id] == kVisited) {
+ TRACE((" Revisit #%d:%s\n", node->id(), node->op()->mnemonic()));
+ state_[id] = kRevisit;
+ revisit_.push_back(node);
+ }
+ }
+
+ Node* dead() {
+ if (dead_ == NULL) dead_ = graph()->NewNode(common_->Dead());
+ return dead_;
+ }
+
+ //===========================================================================
+ // Reducer implementation: perform reductions on a node.
+ //===========================================================================
+ Node* ReduceNode(Node* node) {
+ if (OperatorProperties::GetControlInputCount(node->op()) == 1) {
+ // If a node has only one control input and it is dead, replace with dead.
+ Node* control = NodeProperties::GetControlInput(node);
+ if (control->opcode() == IrOpcode::kDead) {
+ TRACE(("ControlDead: #%d:%s\n", node->id(), node->op()->mnemonic()));
+ return control;
+ }
+ }
+
+ // Reduce branches, phis, and merges.
+ switch (node->opcode()) {
+ case IrOpcode::kBranch:
+ return ReduceBranch(node);
+ case IrOpcode::kLoop:
+ case IrOpcode::kMerge:
+ return ReduceMerge(node);
+ case IrOpcode::kPhi:
+ case IrOpcode::kEffectPhi:
+ return ReducePhi(node);
+ default:
+ return node;
+ }
+ }
+
+ // Reduce redundant phis.
+ Node* ReducePhi(Node* node) {
+ int n = node->InputCount();
+ if (n <= 1) return dead(); // No non-control inputs.
+ if (n == 2) return node->InputAt(0); // Only one non-control input.
+
+ Node* replacement = NULL;
+ Node::Inputs inputs = node->inputs();
+ for (InputIter it = inputs.begin(); n > 1; --n, ++it) {
+ Node* input = *it;
+ if (input->opcode() == IrOpcode::kDead) continue; // ignore dead inputs.
+ if (input != node && input != replacement) { // non-redundant input.
+ if (replacement != NULL) return node;
+ replacement = input;
+ }
+ }
+ return replacement == NULL ? dead() : replacement;
+ }
+
+ // Reduce merges by trimming away dead inputs from the merge and phis.
+ Node* ReduceMerge(Node* node) {
+ // Count the number of live inputs.
+ int live = 0;
+ int index = 0;
+ int live_index = 0;
+ for (Node* const input : node->inputs()) {
+ if (input->opcode() != IrOpcode::kDead) {
+ live++;
+ live_index = index;
+ }
+ index++;
+ }
+
+ if (live > 1 && live == node->InputCount()) return node; // nothing to do.
+
+ TRACE(("ReduceMerge: #%d:%s (%d live)\n", node->id(),
+ node->op()->mnemonic(), live));
+
+ if (live == 0) return dead(); // no remaining inputs.
+
+ // Gather phis and effect phis to be edited.
+ ZoneVector<Node*> phis(zone_);
+ for (Node* const use : node->uses()) {
+ if (use->opcode() == IrOpcode::kPhi ||
+ use->opcode() == IrOpcode::kEffectPhi) {
+ phis.push_back(use);
+ }
+ }
+
+ if (live == 1) {
+ // All phis are redundant. Replace them with their live input.
+ for (Node* const phi : phis) ReplaceNode(phi, phi->InputAt(live_index));
+ // The merge itself is redundant.
+ return node->InputAt(live_index);
+ }
+
+ // Edit phis in place, removing dead inputs and revisiting them.
+ for (Node* const phi : phis) {
+ TRACE((" PhiInMerge: #%d:%s (%d live)\n", phi->id(),
+ phi->op()->mnemonic(), live));
+ RemoveDeadInputs(node, phi);
+ Revisit(phi);
+ }
+ // Edit the merge in place, removing dead inputs.
+ RemoveDeadInputs(node, node);
+ return node;
+ }
+
+ // Reduce branches if they have constant inputs.
+ Node* ReduceBranch(Node* node) {
+ Node* cond = node->InputAt(0);
+ bool is_true;
+ switch (cond->opcode()) {
+ case IrOpcode::kInt32Constant:
+ is_true = !Int32Matcher(cond).Is(0);
+ break;
+ case IrOpcode::kNumberConstant:
+ is_true = !NumberMatcher(cond).Is(0);
+ break;
+ case IrOpcode::kHeapConstant: {
+ Handle<Object> object =
+ HeapObjectMatcher<Object>(cond).Value().handle();
+ if (object->IsTrue())
+ is_true = true;
+ else if (object->IsFalse())
+ is_true = false;
+ else
+ return node; // TODO(turbofan): fold branches on strings, objects.
+ break;
+ }
+ default:
+ return node;
+ }
+
+ TRACE(("BranchReduce: #%d:%s = %s\n", node->id(), node->op()->mnemonic(),
+ is_true ? "true" : "false"));
+
+ // Replace IfTrue and IfFalse projections from this branch.
+ Node* control = NodeProperties::GetControlInput(node);
+ for (UseIter i = node->uses().begin(); i != node->uses().end();) {
+ Node* to = *i;
+ if (to->opcode() == IrOpcode::kIfTrue) {
+ TRACE((" IfTrue: #%d:%s\n", to->id(), to->op()->mnemonic()));
+ i.UpdateToAndIncrement(NULL);
+ ReplaceNode(to, is_true ? control : dead());
+ } else if (to->opcode() == IrOpcode::kIfFalse) {
+ TRACE((" IfFalse: #%d:%s\n", to->id(), to->op()->mnemonic()));
+ i.UpdateToAndIncrement(NULL);
+ ReplaceNode(to, is_true ? dead() : control);
+ } else {
+ ++i;
+ }
+ }
+ return control;
+ }
+
+ // Remove inputs to {node} corresponding to the dead inputs to {merge}
+ // and compact the remaining inputs, updating the operator.
+ void RemoveDeadInputs(Node* merge, Node* node) {
+ int pos = 0;
+ for (int i = 0; i < node->InputCount(); i++) {
+ // skip dead inputs.
+ if (i < merge->InputCount() &&
+ merge->InputAt(i)->opcode() == IrOpcode::kDead)
+ continue;
+ // compact live inputs.
+ if (pos != i) node->ReplaceInput(pos, node->InputAt(i));
+ pos++;
+ }
+ node->TrimInputCount(pos);
+ if (node->opcode() == IrOpcode::kPhi) {
+ node->set_op(common_->Phi(OpParameter<MachineType>(node->op()), pos - 1));
+ } else if (node->opcode() == IrOpcode::kEffectPhi) {
+ node->set_op(common_->EffectPhi(pos - 1));
+ } else if (node->opcode() == IrOpcode::kMerge) {
+ node->set_op(common_->Merge(pos));
+ } else if (node->opcode() == IrOpcode::kLoop) {
+ node->set_op(common_->Loop(pos));
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ // Replace uses of {node} with {replacement} and revisit the uses.
+ void ReplaceNode(Node* node, Node* replacement) {
+ if (node == replacement) return;
+ TRACE((" Replace: #%d:%s with #%d:%s\n", node->id(),
+ node->op()->mnemonic(), replacement->id(),
+ replacement->op()->mnemonic()));
+ for (Node* const use : node->uses()) {
+ // Don't revisit this node if it refers to itself.
+ if (use != node) Revisit(use);
+ }
+ node->ReplaceUses(replacement);
+ node->Kill();
+ }
+
+ Graph* graph() { return jsgraph_->graph(); }
};
+
void ControlReducer::ReduceGraph(Zone* zone, JSGraph* jsgraph,
CommonOperatorBuilder* common) {
- ControlReducerImpl impl(zone, jsgraph, NULL);
- // Only trim the graph for now. Control reduction can reduce non-terminating
- // loops to graphs that are unschedulable at the moment.
+ ControlReducerImpl impl(zone, jsgraph, common);
+ impl.Reduce();
impl.Trim();
}
@@ -118,6 +503,33 @@
ControlReducerImpl impl(zone, jsgraph, NULL);
impl.Trim();
}
+
+
+Node* ControlReducer::ReducePhiForTesting(JSGraph* jsgraph,
+ CommonOperatorBuilder* common,
+ Node* node) {
+ Zone zone(jsgraph->graph()->zone()->isolate());
+ ControlReducerImpl impl(&zone, jsgraph, common);
+ return impl.ReducePhi(node);
+}
+
+
+Node* ControlReducer::ReduceMergeForTesting(JSGraph* jsgraph,
+ CommonOperatorBuilder* common,
+ Node* node) {
+ Zone zone(jsgraph->graph()->zone()->isolate());
+ ControlReducerImpl impl(&zone, jsgraph, common);
+ return impl.ReduceMerge(node);
+}
+
+
+Node* ControlReducer::ReduceBranchForTesting(JSGraph* jsgraph,
+ CommonOperatorBuilder* common,
+ Node* node) {
+ Zone zone(jsgraph->graph()->zone()->isolate());
+ ControlReducerImpl impl(&zone, jsgraph, common);
+ return impl.ReduceBranch(node);
+}
}
}
} // namespace v8::internal::compiler
diff --git a/src/compiler/control-reducer.h b/src/compiler/control-reducer.h
index e9ae9bc..e25bb88 100644
--- a/src/compiler/control-reducer.h
+++ b/src/compiler/control-reducer.h
@@ -11,6 +11,7 @@
class JSGraph;
class CommonOperatorBuilder;
+class Node;
class ControlReducer {
public:
@@ -20,6 +21,16 @@
// Trim nodes in the graph that are not reachable from end.
static void TrimGraph(Zone* zone, JSGraph* graph);
+
+ // Testing interface.
+ static Node* ReducePhiForTesting(JSGraph* graph,
+ CommonOperatorBuilder* builder, Node* node);
+ static Node* ReduceBranchForTesting(JSGraph* graph,
+ CommonOperatorBuilder* builder,
+ Node* node);
+ static Node* ReduceMergeForTesting(JSGraph* graph,
+ CommonOperatorBuilder* builder,
+ Node* node);
};
}
}
diff --git a/src/compiler/generic-node-inl.h b/src/compiler/generic-node-inl.h
index 76313af..afbd1f0 100644
--- a/src/compiler/generic-node-inl.h
+++ b/src/compiler/generic-node-inl.h
@@ -16,13 +16,16 @@
namespace compiler {
template <class B, class S>
-GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count)
+GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count,
+ int reserve_input_count)
: BaseClass(graph->zone()),
input_count_(input_count),
+ reserve_input_count_(reserve_input_count),
has_appendable_inputs_(false),
use_count_(0),
first_use_(NULL),
last_use_(NULL) {
+ DCHECK(reserve_input_count <= kMaxReservedInputs);
inputs_.static_ = reinterpret_cast<Input*>(this + 1);
AssignUniqueID(graph);
}
@@ -154,12 +157,18 @@
template <class B, class S>
void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>* to_append) {
- EnsureAppendableInputs(zone);
Use* new_use = new (zone) Use;
Input new_input;
new_input.to = to_append;
new_input.use = new_use;
- inputs_.appendable_->push_back(new_input);
+ if (reserve_input_count_ > 0) {
+ DCHECK(!has_appendable_inputs_);
+ reserve_input_count_--;
+ inputs_.static_[input_count_] = new_input;
+ } else {
+ EnsureAppendableInputs(zone);
+ inputs_.appendable_->push_back(new_input);
+ }
new_use->input_index = input_count_;
new_use->from = this;
to_append->AppendUse(new_use);
@@ -224,15 +233,16 @@
}
template <class B, class S>
-S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count,
- S** inputs) {
+S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count, S** inputs,
+ bool has_extensible_inputs) {
size_t node_size = sizeof(GenericNode);
- size_t inputs_size = input_count * sizeof(Input);
+ int reserve_input_count = has_extensible_inputs ? kDefaultReservedInputs : 0;
+ size_t inputs_size = (input_count + reserve_input_count) * sizeof(Input);
size_t uses_size = input_count * sizeof(Use);
int size = static_cast<int>(node_size + inputs_size + uses_size);
Zone* zone = graph->zone();
void* buffer = zone->New(size);
- S* result = new (buffer) S(graph, input_count);
+ S* result = new (buffer) S(graph, input_count, reserve_input_count);
Input* input =
reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
Use* use =
diff --git a/src/compiler/generic-node.h b/src/compiler/generic-node.h
index 3dc324d..506a34f 100644
--- a/src/compiler/generic-node.h
+++ b/src/compiler/generic-node.h
@@ -92,7 +92,8 @@
bool OwnedBy(GenericNode* owner) const;
- static S* New(GenericGraphBase* graph, int input_count, S** inputs);
+ static S* New(GenericGraphBase* graph, int input_count, S** inputs,
+ bool has_extensible_inputs);
protected:
friend class GenericGraphBase;
@@ -128,15 +129,21 @@
void* operator new(size_t, void* location) { return location; }
- GenericNode(GenericGraphBase* graph, int input_count);
+ GenericNode(GenericGraphBase* graph, int input_count,
+ int reserved_input_count);
private:
void AssignUniqueID(GenericGraphBase* graph);
typedef ZoneDeque<Input> InputDeque;
+ static const int kReservedInputCountBits = 2;
+ static const int kMaxReservedInputs = (1 << kReservedInputCountBits) - 1;
+ static const int kDefaultReservedInputs = kMaxReservedInputs;
+
NodeId id_;
- int input_count_ : 31;
+ int input_count_ : 29;
+ unsigned int reserve_input_count_ : kReservedInputCountBits;
bool has_appendable_inputs_ : 1;
union {
// When a node is initially allocated, it uses a static buffer to hold its
diff --git a/src/compiler/graph-builder.cc b/src/compiler/graph-builder.cc
index 2d420e4..ae55b95 100644
--- a/src/compiler/graph-builder.cc
+++ b/src/compiler/graph-builder.cc
@@ -25,13 +25,26 @@
common_(common),
environment_(NULL),
local_zone_(local_zone),
+ input_buffer_size_(0),
+ input_buffer_(NULL),
current_context_(NULL),
- exit_control_(NULL) {}
+ exit_control_(NULL) {
+ EnsureInputBufferSize(kInputBufferSizeIncrement);
+}
+
+
+Node** StructuredGraphBuilder::EnsureInputBufferSize(int size) {
+ if (size > input_buffer_size_) {
+ size += kInputBufferSizeIncrement;
+ input_buffer_ = local_zone()->NewArray<Node*>(size);
+ }
+ return input_buffer_;
+}
Node* StructuredGraphBuilder::MakeNode(const Operator* op,
int value_input_count,
- Node** value_inputs) {
+ Node** value_inputs, bool incomplete) {
DCHECK(op->InputCount() == value_input_count);
bool has_context = OperatorProperties::HasContextInput(op);
@@ -44,14 +57,14 @@
Node* result = NULL;
if (!has_context && !has_framestate && !has_control && !has_effect) {
- result = graph()->NewNode(op, value_input_count, value_inputs);
+ result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
int input_count_with_deps = value_input_count;
if (has_context) ++input_count_with_deps;
if (has_framestate) ++input_count_with_deps;
if (has_control) ++input_count_with_deps;
if (has_effect) ++input_count_with_deps;
- Node** buffer = local_zone()->NewArray<Node*>(input_count_with_deps);
+ Node** buffer = EnsureInputBufferSize(input_count_with_deps);
memcpy(buffer, value_inputs, kPointerSize * value_input_count);
Node** current_input = buffer + value_input_count;
if (has_context) {
@@ -69,7 +82,7 @@
if (has_control) {
*current_input++ = environment_->GetControlDependency();
}
- result = graph()->NewNode(op, input_count_with_deps, buffer);
+ result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
if (has_effect) {
environment_->UpdateEffectDependency(result);
}
@@ -125,7 +138,9 @@
// placing a singleton merge as the new control dependency.
if (this->IsMarkedAsUnreachable()) {
Node* other_control = other->control_dependency_;
- control_dependency_ = graph()->NewNode(common()->Merge(1), other_control);
+ Node* inputs[] = {other_control};
+ control_dependency_ =
+ graph()->NewNode(common()->Merge(1), arraysize(inputs), inputs, true);
effect_dependency_ = other->effect_dependency_;
values_ = other->values_;
return;
@@ -164,7 +179,7 @@
Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
- Node** buffer = local_zone()->NewArray<Node*>(count + 1);
+ Node** buffer = EnsureInputBufferSize(count + 1);
MemsetPointer(buffer, input, count);
buffer[count] = control;
return graph()->NewNode(phi_op, count + 1, buffer, true);
@@ -175,7 +190,7 @@
Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
Node* control) {
const Operator* phi_op = common()->EffectPhi(count);
- Node** buffer = local_zone()->NewArray<Node*>(count + 1);
+ Node** buffer = EnsureInputBufferSize(count + 1);
MemsetPointer(buffer, input, count);
buffer[count] = control;
return graph()->NewNode(phi_op, count + 1, buffer, true);
@@ -197,7 +212,8 @@
} else {
// Control node is a singleton, introduce a merge.
const Operator* op = common()->Merge(inputs);
- control = graph()->NewNode(op, control, other);
+ Node* inputs[] = {control, other};
+ control = graph()->NewNode(op, arraysize(inputs), inputs, true);
}
return control;
}
diff --git a/src/compiler/graph-builder.h b/src/compiler/graph-builder.h
index a8ef502..90df6ca 100644
--- a/src/compiler/graph-builder.h
+++ b/src/compiler/graph-builder.h
@@ -24,42 +24,44 @@
explicit GraphBuilder(Graph* graph) : graph_(graph) {}
virtual ~GraphBuilder() {}
- Node* NewNode(const Operator* op) {
- return MakeNode(op, 0, static_cast<Node**>(NULL));
+ Node* NewNode(const Operator* op, bool incomplete = false) {
+ return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
}
- Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+ Node* NewNode(const Operator* op, Node* n1) {
+ return MakeNode(op, 1, &n1, false);
+ }
Node* NewNode(const Operator* op, Node* n1, Node* n2) {
Node* buffer[] = {n1, n2};
- return MakeNode(op, arraysize(buffer), buffer);
+ return MakeNode(op, arraysize(buffer), buffer, false);
}
Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
Node* buffer[] = {n1, n2, n3};
- return MakeNode(op, arraysize(buffer), buffer);
+ return MakeNode(op, arraysize(buffer), buffer, false);
}
Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
Node* buffer[] = {n1, n2, n3, n4};
- return MakeNode(op, arraysize(buffer), buffer);
+ return MakeNode(op, arraysize(buffer), buffer, false);
}
Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
Node* n5) {
Node* buffer[] = {n1, n2, n3, n4, n5};
- return MakeNode(op, arraysize(buffer), buffer);
+ return MakeNode(op, arraysize(buffer), buffer, false);
}
Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
Node* n5, Node* n6) {
Node* nodes[] = {n1, n2, n3, n4, n5, n6};
- return MakeNode(op, arraysize(nodes), nodes);
+ return MakeNode(op, arraysize(nodes), nodes, false);
}
- Node* NewNode(const Operator* op, int value_input_count,
- Node** value_inputs) {
- return MakeNode(op, value_input_count, value_inputs);
+ Node* NewNode(const Operator* op, int value_input_count, Node** value_inputs,
+ bool incomplete = false) {
+ return MakeNode(op, value_input_count, value_inputs, incomplete);
}
Graph* graph() const { return graph_; }
@@ -67,7 +69,7 @@
protected:
// Base implementation used by all factory methods.
virtual Node* MakeNode(const Operator* op, int value_input_count,
- Node** value_inputs) = 0;
+ Node** value_inputs, bool incomplete) = 0;
private:
Graph* graph_;
@@ -95,8 +97,8 @@
// Helpers to create new control nodes.
Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
- Node* NewMerge() { return NewNode(common()->Merge(1)); }
- Node* NewLoop() { return NewNode(common()->Loop(1)); }
+ Node* NewMerge() { return NewNode(common()->Merge(1), true); }
+ Node* NewLoop() { return NewNode(common()->Loop(1), true); }
Node* NewBranch(Node* condition) {
return NewNode(common()->Branch(), condition);
}
@@ -110,7 +112,7 @@
// ensures effect and control dependencies are wired up. The dependencies
// tracked by the environment might be mutated.
virtual Node* MakeNode(const Operator* op, int value_input_count,
- Node** value_inputs) FINAL;
+ Node** value_inputs, bool incomplete) FINAL;
Environment* environment() const { return environment_; }
void set_environment(Environment* env) { environment_ = env; }
@@ -148,6 +150,10 @@
// Zone local to the builder for data not leaking into the graph.
Zone* local_zone_;
+ // Temporary storage for building node input lists.
+ int input_buffer_size_;
+ Node** input_buffer_;
+
// Node representing the control dependency for dead code.
SetOncePointer<Node> dead_control_;
@@ -157,6 +163,12 @@
// Merge of all control nodes that exit the function body.
Node* exit_control_;
+ // Growth increment for the temporary buffer used to construct input lists to
+ // new nodes.
+ static const int kInputBufferSizeIncrement = 64;
+
+ Node** EnsureInputBufferSize(int size);
+
DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
};
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 8cd9a71..31256ec 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -118,7 +118,7 @@
void Print() { const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this); }
- GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
+ void PreEdge(Node* from, int index, Node* to);
private:
std::ostream& os_;
@@ -129,8 +129,7 @@
};
-GenericGraphVisit::Control JSONGraphEdgeWriter::PreEdge(Node* from, int index,
- Node* to) {
+void JSONGraphEdgeWriter::PreEdge(Node* from, int index, Node* to) {
if (first_edge_) {
first_edge_ = false;
} else {
@@ -152,7 +151,6 @@
}
os_ << "{\"source\":" << to->id() << ",\"target\":" << from->id()
<< ",\"index\":" << index << ",\"type\":\"" << edge_type << "\"}";
- return GenericGraphVisit::CONTINUE;
}
@@ -174,7 +172,6 @@
void Print();
GenericGraphVisit::Control Pre(Node* node);
- GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
private:
void AnnotateNode(Node* node);
@@ -221,17 +218,6 @@
}
-GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index,
- Node* to) {
- if (use_to_def_) return GenericGraphVisit::CONTINUE;
- // When going from def to use, only consider white -> other edges, which are
- // the dead nodes that use live nodes. We're probably not interested in
- // dead nodes that only use other dead nodes.
- if (white_nodes_.count(from) > 0) return GenericGraphVisit::CONTINUE;
- return GenericGraphVisit::SKIP;
-}
-
-
static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
if (from->opcode() == IrOpcode::kPhi ||
from->opcode() == IrOpcode::kEffectPhi) {
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
index 19e9c7f..1de712e 100644
--- a/src/compiler/graph.cc
+++ b/src/compiler/graph.cc
@@ -11,6 +11,7 @@
#include "src/compiler/node-aux-data-inl.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/operator-properties-inl.h"
@@ -29,12 +30,14 @@
}
-Node* Graph::NewNode(
- const Operator* op, int input_count, Node** inputs, bool incomplete) {
+Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
+ bool incomplete) {
DCHECK_LE(op->InputCount(), input_count);
- Node* result = Node::New(this, input_count, inputs);
+ Node* result = Node::New(this, input_count, inputs, incomplete);
result->Initialize(op);
- if (!incomplete) Decorate(result);
+ if (!incomplete) {
+ Decorate(result);
+ }
return result;
}
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 87d8c18..d7098ae 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -793,7 +793,7 @@
frame->SetRegisterSaveAreaSize(register_save_area_size);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -1023,7 +1023,7 @@
void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
- if (!linkage()->info()->IsStub()) {
+ if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 4baa595..b8d2fca 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -1071,6 +1071,13 @@
UNIMPLEMENTED();
}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::Flag::kNoFlags;
+}
+
#endif // !V8_TURBOFAN_BACKEND
} // namespace compiler
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 9240b06..c16aa82 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -19,7 +19,7 @@
JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph)
: info_(info),
jsgraph_(jsgraph),
- linkage_(new (jsgraph->zone()) Linkage(info)) {}
+ linkage_(new (jsgraph->zone()) Linkage(jsgraph->zone(), info)) {}
void JSGenericLowering::PatchOperator(Node* node, const Operator* op) {
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index e3cecb5..4f74d6f 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -128,6 +128,8 @@
Node* NumberConstant(double value);
Factory* factory() { return isolate()->factory(); }
+
+ DISALLOW_COPY_AND_ASSIGN(JSGraph);
};
} // namespace compiler
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 1ce8173..4927ce5 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -195,7 +195,7 @@
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
// graphs.
-class JSOperatorBuilder FINAL {
+class JSOperatorBuilder FINAL : public ZoneObject {
public:
explicit JSOperatorBuilder(Zone* zone);
@@ -266,6 +266,8 @@
const JSOperatorBuilderImpl& impl_;
Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSOperatorBuilder);
};
} // namespace compiler
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 4239f9f..481b443 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -39,28 +39,30 @@
}
-Linkage::Linkage(CompilationInfo* info) : info_(info) {
+CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
if (info->function() != NULL) {
// If we already have the function literal, use the number of parameters
// plus the receiver.
- incoming_ = GetJSCallDescriptor(1 + info->function()->parameter_count());
- } else if (!info->closure().is_null()) {
+ return GetJSCallDescriptor(1 + info->function()->parameter_count(), zone);
+ }
+ if (!info->closure().is_null()) {
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
SharedFunctionInfo* shared = info->closure()->shared();
- incoming_ = GetJSCallDescriptor(1 + shared->formal_parameter_count());
- } else if (info->code_stub() != NULL) {
+ return GetJSCallDescriptor(1 + shared->formal_parameter_count(), zone);
+ }
+ if (info->code_stub() != NULL) {
// Use the code stub interface descriptor.
CallInterfaceDescriptor descriptor =
info->code_stub()->GetCallInterfaceDescriptor();
- incoming_ = GetStubCallDescriptor(descriptor);
- } else {
- incoming_ = NULL; // TODO(titzer): ?
+ return GetStubCallDescriptor(descriptor, 0, CallDescriptor::kNoFlags, zone);
}
+ return NULL; // TODO(titzer): ?
}
-FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, int extra) {
+FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame,
+ int extra) const {
if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() ||
incoming_->kind() == CallDescriptor::kCallAddress) {
int offset;
@@ -87,24 +89,22 @@
}
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) {
- return GetJSCallDescriptor(parameter_count, this->info_->zone());
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) const {
+ return GetJSCallDescriptor(parameter_count, zone_);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- return GetRuntimeCallDescriptor(function, parameter_count, properties,
- this->info_->zone());
+ Operator::Properties properties) const {
+ return GetRuntimeCallDescriptor(function, parameter_count, properties, zone_);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
CallInterfaceDescriptor descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags) {
- return GetStubCallDescriptor(descriptor, stack_parameter_count, flags,
- this->info_->zone());
+ CallDescriptor::Flags flags) const {
+ return GetStubCallDescriptor(descriptor, stack_parameter_count, flags, zone_);
}
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 63d9f1b..cc5be9b 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -129,16 +129,18 @@
private:
friend class Linkage;
- Kind kind_;
- MachineType target_type_;
- LinkageLocation target_loc_;
- MachineSignature* machine_sig_;
- LocationSignature* location_sig_;
- size_t js_param_count_;
- Operator::Properties properties_;
- RegList callee_saved_registers_;
- Flags flags_;
- const char* debug_name_;
+ const Kind kind_;
+ const MachineType target_type_;
+ const LinkageLocation target_loc_;
+ const MachineSignature* const machine_sig_;
+ const LocationSignature* const location_sig_;
+ const size_t js_param_count_;
+ const Operator::Properties properties_;
+ const RegList callee_saved_registers_;
+ const Flags flags_;
+ const char* const debug_name_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallDescriptor);
};
DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
@@ -161,25 +163,28 @@
// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
class Linkage : public ZoneObject {
public:
- explicit Linkage(CompilationInfo* info);
- explicit Linkage(CompilationInfo* info, CallDescriptor* incoming)
- : info_(info), incoming_(incoming) {}
+ Linkage(Zone* zone, CompilationInfo* info)
+ : zone_(zone), incoming_(ComputeIncoming(zone, info)) {}
+ Linkage(Zone* zone, CallDescriptor* incoming)
+ : zone_(zone), incoming_(incoming) {}
+
+ static CallDescriptor* ComputeIncoming(Zone* zone, CompilationInfo* info);
// The call descriptor for this compilation unit describes the locations
// of incoming parameters and the outgoing return value(s).
- CallDescriptor* GetIncomingDescriptor() { return incoming_; }
- CallDescriptor* GetJSCallDescriptor(int parameter_count);
+ CallDescriptor* GetIncomingDescriptor() const { return incoming_; }
+ CallDescriptor* GetJSCallDescriptor(int parameter_count) const;
static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone);
- CallDescriptor* GetRuntimeCallDescriptor(Runtime::FunctionId function,
- int parameter_count,
- Operator::Properties properties);
+ CallDescriptor* GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) const;
static CallDescriptor* GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
Operator::Properties properties, Zone* zone);
CallDescriptor* GetStubCallDescriptor(
CallInterfaceDescriptor descriptor, int stack_parameter_count = 0,
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags);
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags) const;
static CallDescriptor* GetStubCallDescriptor(
CallInterfaceDescriptor descriptor, int stack_parameter_count,
CallDescriptor::Flags flags, Zone* zone);
@@ -192,37 +197,37 @@
MachineSignature* sig);
// Get the location of an (incoming) parameter to this function.
- LinkageLocation GetParameterLocation(int index) {
+ LinkageLocation GetParameterLocation(int index) const {
return incoming_->GetInputLocation(index + 1); // + 1 to skip target.
}
// Get the machine type of an (incoming) parameter to this function.
- MachineType GetParameterType(int index) {
+ MachineType GetParameterType(int index) const {
return incoming_->GetInputType(index + 1); // + 1 to skip target.
}
// Get the location where this function should place its return value.
- LinkageLocation GetReturnLocation() {
+ LinkageLocation GetReturnLocation() const {
return incoming_->GetReturnLocation(0);
}
// Get the machine type of this function's return value.
- MachineType GetReturnType() { return incoming_->GetReturnType(0); }
+ MachineType GetReturnType() const { return incoming_->GetReturnType(0); }
// Get the frame offset for a given spill slot. The location depends on the
// calling convention and the specific frame layout, and may thus be
// architecture-specific. Negative spill slots indicate arguments on the
// caller's frame. The {extra} parameter indicates an additional offset from
// the frame offset, e.g. to index into part of a double slot.
- FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0);
-
- CompilationInfo* info() const { return info_; }
+ FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0) const;
static bool NeedsFrameState(Runtime::FunctionId function);
private:
- CompilationInfo* info_;
- CallDescriptor* incoming_;
+ Zone* const zone_;
+ CallDescriptor* const incoming_;
+
+ DISALLOW_COPY_AND_ASSIGN(Linkage);
};
} // namespace compiler
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 1951446..2c87189 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -56,7 +56,7 @@
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
-class MachineOperatorBuilder FINAL {
+class MachineOperatorBuilder FINAL : public ZoneObject {
public:
// Flags that specify which operations are available. This is useful
// for operations that are unsupported by some back-ends.
@@ -209,6 +209,7 @@
const MachineOperatorBuilderImpl& impl_;
const MachineType word_;
const Flags flags_;
+ DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
};
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 9609cf4..9a8b98a 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -709,7 +709,7 @@
__ MultiPush(saves);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -942,7 +942,7 @@
void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
- if (!linkage()->info()->IsStub()) {
+ if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/src/compiler/node.h b/src/compiler/node.h
index 7c99705..3a5afd2 100644
--- a/src/compiler/node.h
+++ b/src/compiler/node.h
@@ -48,8 +48,8 @@
// out-of-line indexed by the Node's id.
class Node FINAL : public GenericNode<NodeData, Node> {
public:
- Node(GenericGraphBase* graph, int input_count)
- : GenericNode<NodeData, Node>(graph, input_count) {}
+ Node(GenericGraphBase* graph, int input_count, int reserve_input_count)
+ : GenericNode<NodeData, Node>(graph, input_count, reserve_input_count) {}
void Initialize(const Operator* op) { set_op(op); }
diff --git a/src/compiler/operator-properties-inl.h b/src/compiler/operator-properties-inl.h
index 771f560..7c2ae16 100644
--- a/src/compiler/operator-properties-inl.h
+++ b/src/compiler/operator-properties-inl.h
@@ -117,6 +117,7 @@
}
inline int OperatorProperties::GetControlInputCount(const Operator* op) {
+ // TODO(titzer): fix this mess; just make them a count on the operator.
switch (op->opcode()) {
case IrOpcode::kPhi:
case IrOpcode::kEffectPhi:
@@ -127,8 +128,8 @@
#define OPCODE_CASE(x) case IrOpcode::k##x:
CONTROL_OP_LIST(OPCODE_CASE)
#undef OPCODE_CASE
- // Branch operator is special
if (op->opcode() == IrOpcode::kBranch) return 1;
+ if (op->opcode() == IrOpcode::kTerminate) return 1;
// Control operators are Operator1<int>.
return OpParameter<int>(op);
default:
diff --git a/src/compiler/pipeline-statistics.cc b/src/compiler/pipeline-statistics.cc
index 45408b5..e58c396 100644
--- a/src/compiler/pipeline-statistics.cc
+++ b/src/compiler/pipeline-statistics.cc
@@ -16,6 +16,10 @@
scope_.Reset(new ZonePool::StatsScope(pipeline_stats->zone_pool_));
timer_.Start();
outer_zone_initial_size_ = pipeline_stats->OuterZoneSize();
+ allocated_bytes_at_start_ =
+ outer_zone_initial_size_ -
+ pipeline_stats->total_stats_.outer_zone_initial_size_ +
+ pipeline_stats->zone_pool_->GetCurrentAllocatedBytes();
}
@@ -28,6 +32,8 @@
size_t outer_zone_diff =
pipeline_stats->OuterZoneSize() - outer_zone_initial_size_;
diff->max_allocated_bytes_ = outer_zone_diff + scope_->GetMaxAllocatedBytes();
+ diff->absolute_max_allocated_bytes_ =
+ diff->max_allocated_bytes_ + allocated_bytes_at_start_;
diff->total_allocated_bytes_ =
outer_zone_diff + scope_->GetTotalAllocatedBytes();
scope_.Reset(NULL);
diff --git a/src/compiler/pipeline-statistics.h b/src/compiler/pipeline-statistics.h
index 972a710..01cc9de 100644
--- a/src/compiler/pipeline-statistics.h
+++ b/src/compiler/pipeline-statistics.h
@@ -39,6 +39,7 @@
SmartPointer<ZonePool::StatsScope> scope_;
base::ElapsedTimer timer_;
size_t outer_zone_initial_size_;
+ size_t allocated_bytes_at_start_;
};
bool InPhaseKind() { return !phase_kind_stats_.scope_.is_empty(); }
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index a46686b..bf64f66 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -40,6 +40,128 @@
namespace internal {
namespace compiler {
+class PipelineData {
+ public:
+ explicit PipelineData(CompilationInfo* info, ZonePool* zone_pool,
+ PipelineStatistics* pipeline_statistics)
+ : isolate_(info->zone()->isolate()),
+ outer_zone_(info->zone()),
+ zone_pool_(zone_pool),
+ pipeline_statistics_(pipeline_statistics),
+ graph_zone_scope_(zone_pool_),
+ graph_zone_(graph_zone_scope_.zone()),
+ graph_(new (graph_zone()) Graph(graph_zone())),
+ source_positions_(new SourcePositionTable(graph())),
+ machine_(new (graph_zone()) MachineOperatorBuilder(
+ kMachPtr, InstructionSelector::SupportedMachineOperatorFlags())),
+ common_(new (graph_zone()) CommonOperatorBuilder(graph_zone())),
+ javascript_(new (graph_zone()) JSOperatorBuilder(graph_zone())),
+ jsgraph_(new (graph_zone())
+ JSGraph(graph(), common(), javascript(), machine())),
+ typer_(new Typer(graph(), info->context())),
+ schedule_(NULL),
+ instruction_zone_scope_(zone_pool_),
+ instruction_zone_(instruction_zone_scope_.zone()) {}
+
+ // For machine graph testing only.
+ PipelineData(Graph* graph, Schedule* schedule, ZonePool* zone_pool)
+ : isolate_(graph->zone()->isolate()),
+ outer_zone_(NULL),
+ zone_pool_(zone_pool),
+ pipeline_statistics_(NULL),
+ graph_zone_scope_(zone_pool_),
+ graph_zone_(NULL),
+ graph_(graph),
+ source_positions_(new SourcePositionTable(graph)),
+ machine_(NULL),
+ common_(NULL),
+ javascript_(NULL),
+ jsgraph_(NULL),
+ typer_(NULL),
+ schedule_(schedule),
+ instruction_zone_scope_(zone_pool_),
+ instruction_zone_(instruction_zone_scope_.zone()) {}
+
+ ~PipelineData() {
+ DeleteInstructionZone();
+ DeleteGraphZone();
+ }
+
+ Isolate* isolate() const { return isolate_; }
+ ZonePool* zone_pool() const { return zone_pool_; }
+ PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
+
+ Zone* graph_zone() const { return graph_zone_; }
+ Graph* graph() const { return graph_; }
+ SourcePositionTable* source_positions() const {
+ return source_positions_.get();
+ }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ JSOperatorBuilder* javascript() const { return javascript_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Typer* typer() const { return typer_.get(); }
+ Schedule* schedule() const { return schedule_; }
+ void set_schedule(Schedule* schedule) {
+ DCHECK_EQ(NULL, schedule_);
+ schedule_ = schedule;
+ }
+
+ Zone* instruction_zone() const { return instruction_zone_; }
+
+ void DeleteGraphZone() {
+ // Destroy objects with destructors first.
+ source_positions_.Reset(NULL);
+ typer_.Reset(NULL);
+ if (graph_zone_ == NULL) return;
+ // Destroy zone and clear pointers.
+ graph_zone_scope_.Destroy();
+ graph_zone_ = NULL;
+ graph_ = NULL;
+ machine_ = NULL;
+ common_ = NULL;
+ javascript_ = NULL;
+ jsgraph_ = NULL;
+ schedule_ = NULL;
+ }
+
+ void DeleteInstructionZone() {
+ if (instruction_zone_ == NULL) return;
+ instruction_zone_scope_.Destroy();
+ instruction_zone_ = NULL;
+ }
+
+ private:
+ Isolate* isolate_;
+ Zone* outer_zone_;
+ ZonePool* zone_pool_;
+ PipelineStatistics* pipeline_statistics_;
+
+ ZonePool::Scope graph_zone_scope_;
+ Zone* graph_zone_;
+ // All objects in the following group of fields are allocated in graph_zone_.
+ // They are all set to NULL when the graph_zone_ is destroyed.
+ Graph* graph_;
+ // TODO(dcarney): make this into a ZoneObject.
+ SmartPointer<SourcePositionTable> source_positions_;
+ MachineOperatorBuilder* machine_;
+ CommonOperatorBuilder* common_;
+ JSOperatorBuilder* javascript_;
+ JSGraph* jsgraph_;
+ // TODO(dcarney): make this into a ZoneObject.
+ SmartPointer<Typer> typer_;
+ Schedule* schedule_;
+
+ // All objects in the following group of fields are allocated in
+ // instruction_zone_. They are all set to NULL when the instruction_zone_ is
+ // destroyed.
+ ZonePool::Scope instruction_zone_scope_;
+ Zone* instruction_zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(PipelineData);
+};
+
+
static inline bool VerifyGraphs() {
#ifdef DEBUG
return true;
@@ -157,7 +279,7 @@
SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
pipeline_statistics.Reset(new PipelineStatistics(info(), &zone_pool));
- pipeline_statistics->BeginPhaseKind("create graph");
+ pipeline_statistics->BeginPhaseKind("graph creation");
}
if (FLAG_trace_turbo) {
@@ -170,32 +292,24 @@
tcf << AsC1VCompilation(info());
}
- // Build the graph.
- Graph graph(zone());
- SourcePositionTable source_positions(&graph);
- source_positions.AddDecorator();
- // TODO(turbofan): there is no need to type anything during initial graph
- // construction. This is currently only needed for the node cache, which the
- // typer could sweep over later.
- Typer typer(&graph, info()->context());
- MachineOperatorBuilder machine(
- kMachPtr, InstructionSelector::SupportedMachineOperatorFlags());
- CommonOperatorBuilder common(zone());
- JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(&graph, &common, &javascript, &machine);
+ // Initialize the graph and builders.
+ PipelineData data(info(), &zone_pool, pipeline_statistics.get());
+
+ data.source_positions()->AddDecorator();
+
Node* context_node;
{
PhaseScope phase_scope(pipeline_statistics.get(), "graph builder");
- ZonePool::Scope zone_scope(&zone_pool);
- AstGraphBuilderWithPositions graph_builder(zone_scope.zone(), info(),
- &jsgraph, &source_positions);
+ ZonePool::Scope zone_scope(data.zone_pool());
+ AstGraphBuilderWithPositions graph_builder(
+ zone_scope.zone(), info(), data.jsgraph(), data.source_positions());
graph_builder.CreateGraph();
context_node = graph_builder.GetFunctionContext();
}
{
PhaseScope phase_scope(pipeline_statistics.get(), "phi reduction");
PhiReducer phi_reducer;
- GraphReducer graph_reducer(&graph);
+ GraphReducer graph_reducer(data.graph());
graph_reducer.AddReducer(&phi_reducer);
graph_reducer.ReduceGraph();
// TODO(mstarzinger): Running reducer once ought to be enough for everyone.
@@ -203,30 +317,30 @@
graph_reducer.ReduceGraph();
}
- VerifyAndPrintGraph(&graph, "Initial untyped", true);
+ VerifyAndPrintGraph(data.graph(), "Initial untyped", true);
if (info()->is_context_specializing()) {
- SourcePositionTable::Scope pos(&source_positions,
+ SourcePositionTable::Scope pos(data.source_positions(),
SourcePosition::Unknown());
// Specialize the code to the context as aggressively as possible.
- JSContextSpecializer spec(info(), &jsgraph, context_node);
+ JSContextSpecializer spec(info(), data.jsgraph(), context_node);
spec.SpecializeToContext();
- VerifyAndPrintGraph(&graph, "Context specialized", true);
+ VerifyAndPrintGraph(data.graph(), "Context specialized", true);
}
if (info()->is_inlining_enabled()) {
PhaseScope phase_scope(pipeline_statistics.get(), "inlining");
- SourcePositionTable::Scope pos(&source_positions,
+ SourcePositionTable::Scope pos(data.source_positions(),
SourcePosition::Unknown());
- ZonePool::Scope zone_scope(&zone_pool);
- JSInliner inliner(zone_scope.zone(), info(), &jsgraph);
+ ZonePool::Scope zone_scope(data.zone_pool());
+ JSInliner inliner(zone_scope.zone(), info(), data.jsgraph());
inliner.Inline();
- VerifyAndPrintGraph(&graph, "Inlined", true);
+ VerifyAndPrintGraph(data.graph(), "Inlined", true);
}
// Print a replay of the initial graph.
if (FLAG_print_turbo_replay) {
- GraphReplayPrinter::PrintReplay(&graph);
+ GraphReplayPrinter::PrintReplay(data.graph());
}
// Bailout here in case target architecture is not supported.
@@ -236,8 +350,8 @@
{
// Type the graph.
PhaseScope phase_scope(pipeline_statistics.get(), "typer");
- typer.Run();
- VerifyAndPrintGraph(&graph, "Typed");
+ data.typer()->Run();
+ VerifyAndPrintGraph(data.graph(), "Typed");
}
}
@@ -249,46 +363,46 @@
{
// Lower JSOperators where we can determine types.
PhaseScope phase_scope(pipeline_statistics.get(), "typed lowering");
- SourcePositionTable::Scope pos(&source_positions,
+ SourcePositionTable::Scope pos(data.source_positions(),
SourcePosition::Unknown());
- ValueNumberingReducer vn_reducer(zone());
- JSTypedLowering lowering(&jsgraph);
- SimplifiedOperatorReducer simple_reducer(&jsgraph);
- GraphReducer graph_reducer(&graph);
+ ValueNumberingReducer vn_reducer(data.graph_zone());
+ JSTypedLowering lowering(data.jsgraph());
+ SimplifiedOperatorReducer simple_reducer(data.jsgraph());
+ GraphReducer graph_reducer(data.graph());
graph_reducer.AddReducer(&vn_reducer);
graph_reducer.AddReducer(&lowering);
graph_reducer.AddReducer(&simple_reducer);
graph_reducer.ReduceGraph();
- VerifyAndPrintGraph(&graph, "Lowered typed");
+ VerifyAndPrintGraph(data.graph(), "Lowered typed");
}
{
// Lower simplified operators and insert changes.
PhaseScope phase_scope(pipeline_statistics.get(), "simplified lowering");
- SourcePositionTable::Scope pos(&source_positions,
+ SourcePositionTable::Scope pos(data.source_positions(),
SourcePosition::Unknown());
- SimplifiedLowering lowering(&jsgraph);
+ SimplifiedLowering lowering(data.jsgraph());
lowering.LowerAllNodes();
- ValueNumberingReducer vn_reducer(zone());
- SimplifiedOperatorReducer simple_reducer(&jsgraph);
- GraphReducer graph_reducer(&graph);
+ ValueNumberingReducer vn_reducer(data.graph_zone());
+ SimplifiedOperatorReducer simple_reducer(data.jsgraph());
+ GraphReducer graph_reducer(data.graph());
graph_reducer.AddReducer(&vn_reducer);
graph_reducer.AddReducer(&simple_reducer);
graph_reducer.ReduceGraph();
- VerifyAndPrintGraph(&graph, "Lowered simplified");
+ VerifyAndPrintGraph(data.graph(), "Lowered simplified");
}
{
// Lower changes that have been inserted before.
PhaseScope phase_scope(pipeline_statistics.get(), "change lowering");
- SourcePositionTable::Scope pos(&source_positions,
+ SourcePositionTable::Scope pos(data.source_positions(),
SourcePosition::Unknown());
- Linkage linkage(info());
- ValueNumberingReducer vn_reducer(zone());
- SimplifiedOperatorReducer simple_reducer(&jsgraph);
- ChangeLowering lowering(&jsgraph, &linkage);
- MachineOperatorReducer mach_reducer(&jsgraph);
- GraphReducer graph_reducer(&graph);
+ Linkage linkage(data.graph_zone(), info());
+ ValueNumberingReducer vn_reducer(data.graph_zone());
+ SimplifiedOperatorReducer simple_reducer(data.jsgraph());
+ ChangeLowering lowering(data.jsgraph(), &linkage);
+ MachineOperatorReducer mach_reducer(data.jsgraph());
+ GraphReducer graph_reducer(data.graph());
// TODO(titzer): Figure out if we should run all reducers at once here.
graph_reducer.AddReducer(&vn_reducer);
graph_reducer.AddReducer(&simple_reducer);
@@ -297,53 +411,49 @@
graph_reducer.ReduceGraph();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
- VerifyAndPrintGraph(&graph, "Lowered changes", true);
+ VerifyAndPrintGraph(data.graph(), "Lowered changes", true);
}
{
PhaseScope phase_scope(pipeline_statistics.get(), "control reduction");
- SourcePositionTable::Scope pos(&source_positions,
+ SourcePositionTable::Scope pos(data.source_positions(),
SourcePosition::Unknown());
- ZonePool::Scope zone_scope(&zone_pool);
- ControlReducer::ReduceGraph(zone_scope.zone(), &jsgraph, &common);
+ ZonePool::Scope zone_scope(data.zone_pool());
+ ControlReducer::ReduceGraph(zone_scope.zone(), data.jsgraph(),
+ data.common());
- VerifyAndPrintGraph(&graph, "Control reduced");
+ VerifyAndPrintGraph(data.graph(), "Control reduced");
}
}
{
// Lower any remaining generic JSOperators.
PhaseScope phase_scope(pipeline_statistics.get(), "generic lowering");
- SourcePositionTable::Scope pos(&source_positions,
+ SourcePositionTable::Scope pos(data.source_positions(),
SourcePosition::Unknown());
- JSGenericLowering lowering(info(), &jsgraph);
- GraphReducer graph_reducer(&graph);
+ JSGenericLowering lowering(info(), data.jsgraph());
+ GraphReducer graph_reducer(data.graph());
graph_reducer.AddReducer(&lowering);
graph_reducer.ReduceGraph();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
- VerifyAndPrintGraph(&graph, "Lowered generic", true);
+ VerifyAndPrintGraph(data.graph(), "Lowered generic", true);
}
if (!pipeline_statistics.is_empty()) {
- pipeline_statistics->BeginPhaseKind("code generation");
+ pipeline_statistics->BeginPhaseKind("block building");
}
- source_positions.RemoveDecorator();
+ data.source_positions()->RemoveDecorator();
- Schedule* schedule;
- {
- PhaseScope phase_scope(pipeline_statistics.get(), "scheduling");
- // Compute a schedule.
- schedule = ComputeSchedule(&zone_pool, &graph);
- }
+ // Compute a schedule.
+ ComputeSchedule(&data);
Handle<Code> code = Handle<Code>::null();
{
// Generate optimized code.
- Linkage linkage(info());
- code = GenerateCode(pipeline_statistics.get(), &zone_pool, &linkage, &graph,
- schedule, &source_positions);
+ Linkage linkage(data.instruction_zone(), info());
+ code = GenerateCode(&linkage, &data);
info()->SetCode(code);
}
@@ -362,11 +472,13 @@
}
-Schedule* Pipeline::ComputeSchedule(ZonePool* zone_pool, Graph* graph) {
- Schedule* schedule = Scheduler::ComputeSchedule(zone_pool, graph);
+void Pipeline::ComputeSchedule(PipelineData* data) {
+ PhaseScope phase_scope(data->pipeline_statistics(), "scheduling");
+ Schedule* schedule =
+ Scheduler::ComputeSchedule(data->zone_pool(), data->graph());
TraceSchedule(schedule);
if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
- return schedule;
+ data->set_schedule(schedule);
}
@@ -375,16 +487,16 @@
Schedule* schedule) {
ZonePool zone_pool(isolate());
CHECK(SupportedBackend());
+ PipelineData data(graph, schedule, &zone_pool);
if (schedule == NULL) {
// TODO(rossberg): Should this really be untyped?
VerifyAndPrintGraph(graph, "Machine", true);
- schedule = ComputeSchedule(&zone_pool, graph);
+ ComputeSchedule(&data);
+ } else {
+ TraceSchedule(schedule);
}
- TraceSchedule(schedule);
- SourcePositionTable source_positions(graph);
- Handle<Code> code = GenerateCode(NULL, &zone_pool, linkage, graph, schedule,
- &source_positions);
+ Handle<Code> code = GenerateCode(linkage, &data);
#if ENABLE_DISASSEMBLER
if (!code.is_null() && FLAG_print_opt_code) {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
@@ -396,29 +508,27 @@
}
-Handle<Code> Pipeline::GenerateCode(PipelineStatistics* pipeline_statistics,
- ZonePool* zone_pool, Linkage* linkage,
- Graph* graph, Schedule* schedule,
- SourcePositionTable* source_positions) {
- DCHECK_NOT_NULL(graph);
+Handle<Code> Pipeline::GenerateCode(Linkage* linkage, PipelineData* data) {
DCHECK_NOT_NULL(linkage);
- DCHECK_NOT_NULL(schedule);
+ DCHECK_NOT_NULL(data->graph());
+ DCHECK_NOT_NULL(data->schedule());
CHECK(SupportedBackend());
BasicBlockProfiler::Data* profiler_data = NULL;
if (FLAG_turbo_profiling) {
- profiler_data = BasicBlockInstrumentor::Instrument(info_, graph, schedule);
+ profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
+ data->schedule());
}
- Zone* instruction_zone = schedule->zone();
- InstructionSequence sequence(instruction_zone, graph, schedule);
+ InstructionSequence sequence(data->instruction_zone(), data->graph(),
+ data->schedule());
// Select and schedule instructions covering the scheduled graph.
{
- PhaseScope phase_scope(pipeline_statistics, "select instructions");
- ZonePool::Scope zone_scope(zone_pool);
+ PhaseScope phase_scope(data->pipeline_statistics(), "select instructions");
+ ZonePool::Scope zone_scope(data->zone_pool());
InstructionSelector selector(zone_scope.zone(), linkage, &sequence,
- schedule, source_positions);
+ data->schedule(), data->source_positions());
selector.SelectInstructions();
}
@@ -427,22 +537,28 @@
os << "----- Instruction sequence before register allocation -----\n"
<< sequence;
TurboCfgFile tcf(isolate());
- tcf << AsC1V("CodeGen", schedule, source_positions, &sequence);
+ tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
+ &sequence);
+ }
+
+ data->DeleteGraphZone();
+
+ if (data->pipeline_statistics() != NULL) {
+ data->pipeline_statistics()->BeginPhaseKind("register allocation");
}
// Allocate registers.
Frame frame;
{
- int node_count = graph->NodeCount();
+ int node_count = sequence.VirtualRegisterCount();
if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
- linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersForValues);
+ info()->AbortOptimization(kNotEnoughVirtualRegistersForValues);
return Handle<Code>::null();
}
- ZonePool::Scope zone_scope(zone_pool);
- RegisterAllocator allocator(zone_scope.zone(), &frame, linkage->info(),
- &sequence);
- if (!allocator.Allocate(pipeline_statistics)) {
- linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+ ZonePool::Scope zone_scope(data->zone_pool());
+ RegisterAllocator allocator(zone_scope.zone(), &frame, info(), &sequence);
+ if (!allocator.Allocate(data->pipeline_statistics())) {
+ info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
return Handle<Code>::null();
}
if (FLAG_trace_turbo) {
@@ -457,11 +573,15 @@
<< sequence;
}
+ if (data->pipeline_statistics() != NULL) {
+ data->pipeline_statistics()->BeginPhaseKind("code generation");
+ }
+
// Generate native sequence.
Handle<Code> code;
{
- PhaseScope phase_scope(pipeline_statistics, "generate code");
- CodeGenerator generator(&frame, linkage, &sequence);
+ PhaseScope phase_scope(data->pipeline_statistics(), "generate code");
+ CodeGenerator generator(&frame, linkage, &sequence, info());
code = generator.GenerateCode();
}
if (profiler_data != NULL) {
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index f81854e..7811300 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -18,13 +18,9 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
class Graph;
-class InstructionSequence;
class Linkage;
-class PipelineStatistics;
-class RegisterAllocator;
+class PipelineData;
class Schedule;
-class SourcePositionTable;
-class ZonePool;
class Pipeline {
public:
@@ -49,15 +45,11 @@
CompilationInfo* info() const { return info_; }
Isolate* isolate() { return info_->isolate(); }
- Zone* zone() { return info_->zone(); }
- Schedule* ComputeSchedule(ZonePool* zone_pool, Graph* graph);
+ void ComputeSchedule(PipelineData* data);
void VerifyAndPrintGraph(Graph* graph, const char* phase,
bool untyped = false);
- Handle<Code> GenerateCode(PipelineStatistics* pipeline_statistics,
- ZonePool* zone_pool, Linkage* linkage, Graph* graph,
- Schedule* schedule,
- SourcePositionTable* source_positions);
+ Handle<Code> GenerateCode(Linkage* linkage, PipelineData* data);
};
}
}
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 39d44f3..ba545c1 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -151,10 +151,10 @@
Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
- Node** inputs) {
+ Node** inputs, bool incomplete) {
DCHECK(ScheduleValid());
DCHECK(current_block_ != NULL);
- Node* node = graph()->NewNode(op, input_count, inputs);
+ Node* node = graph()->NewNode(op, input_count, inputs, incomplete);
BasicBlock* block = op->opcode() == IrOpcode::kParameter ? schedule()->start()
: CurrentBlock();
schedule()->AddNode(block, node);
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 35f884f..dfe83fa 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -416,8 +416,8 @@
Schedule* Export();
protected:
- virtual Node* MakeNode(const Operator* op, int input_count,
- Node** inputs) FINAL;
+ virtual Node* MakeNode(const Operator* op, int input_count, Node** inputs,
+ bool incomplete) FINAL;
bool ScheduleValid() { return schedule_ != NULL; }
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index e6076ce..7974182 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -277,6 +277,8 @@
BasicBlockVector rpo_order_; // Reverse-post-order block list.
BasicBlock* start_;
BasicBlock* end_;
+
+ DISALLOW_COPY_AND_ASSIGN(Schedule);
};
std::ostream& operator<<(std::ostream& os, const Schedule& s);
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index a75a9ef..346e072 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -382,7 +382,8 @@
}
bool IsFinalMerge(Node* node) {
- return (node == scheduler_->graph_->end()->InputAt(0));
+ return (node->opcode() == IrOpcode::kMerge &&
+ node == scheduler_->graph_->end()->InputAt(0));
}
};
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index 994beb8..2cb0f51 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -71,6 +71,7 @@
ZoneVector<Handle<Object> > weaken_min_limits_;
ZoneVector<Handle<Object> > weaken_max_limits_;
+ DISALLOW_COPY_AND_ASSIGN(Typer);
};
}
}
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index f8f0e50..de97103 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -766,7 +766,7 @@
frame()->SetRegisterSaveAreaSize(register_save_area_size);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -992,7 +992,7 @@
void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
- if (!linkage()->info()->IsStub()) {
+ if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index 71e006c..7032415 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -124,7 +124,7 @@
printf("%s\n", *text_str);
}
running = response_details->Get(String::NewFromUtf8(isolate, "running"))
- ->ToBoolean()
+ ->ToBoolean(isolate)
->Value();
}
}
diff --git a/src/d8.cc b/src/d8.cc
index 85d1048..fb24bcc 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -186,7 +186,7 @@
int name_length = 0;
uint16_t* name_buffer = NULL;
if (name->IsString()) {
- Local<String> name_string = name->ToString();
+ Local<String> name_string = Local<String>::Cast(name);
name_length = name_string->Length();
name_buffer = new uint16_t[name_length];
name_string->Write(name_buffer, 0, name_length);
@@ -410,7 +410,7 @@
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- int index = data->RealmFind(args[0]->ToObject()->CreationContext());
+ int index = data->RealmFind(args[0]->ToObject(isolate)->CreationContext());
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@@ -480,7 +480,7 @@
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- ScriptCompiler::Source script_source(args[1]->ToString());
+ ScriptCompiler::Source script_source(args[1]->ToString(isolate));
Handle<UnboundScript> script = ScriptCompiler::CompileUnbound(
isolate, &script_source);
if (script.IsEmpty()) return;
@@ -526,7 +526,7 @@
// Explicitly catch potential exceptions in toString().
v8::TryCatch try_catch;
- Handle<String> str_obj = args[i]->ToString();
+ Handle<String> str_obj = args[i]->ToString(args.GetIsolate());
if (try_catch.HasCaught()) {
try_catch.ReThrow();
return;
diff --git a/src/extensions/statistics-extension.cc b/src/extensions/statistics-extension.cc
index 6f63245..bb5ee33 100644
--- a/src/extensions/statistics-extension.cc
+++ b/src/extensions/statistics-extension.cc
@@ -53,7 +53,8 @@
Heap* heap = isolate->heap();
if (args.Length() > 0) { // GC if first argument evaluates to true.
- if (args[0]->IsBoolean() && args[0]->ToBoolean()->Value()) {
+ if (args[0]->IsBoolean() &&
+ args[0]->ToBoolean(args.GetIsolate())->Value()) {
heap->CollectAllGarbage(Heap::kNoGCFlags, "counters extension");
}
}
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index f7899c9..f309eae 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -2521,14 +2521,13 @@
// Note that we never eliminate a transition array, though we might right-trim
// such that number_of_transitions() == 0. If this assumption changes,
- // TransitionArray::Insert() will need to deal with the case that a transition
- // array disappeared during GC.
- int trim = t->number_of_transitions_storage() - transition_index;
+ // TransitionArray::CopyInsert() will need to deal with the case that a
+ // transition array disappeared during GC.
+ int trim = t->number_of_transitions() - transition_index;
if (trim > 0) {
heap_->RightTrimFixedArray<Heap::FROM_GC>(
t, t->IsSimpleTransition() ? trim
: trim * TransitionArray::kTransitionSize);
- t->SetNumberOfTransitions(transition_index);
}
DCHECK(map->HasTransitionArray());
}
diff --git a/src/mips64/builtins-mips64.cc b/src/mips64/builtins-mips64.cc
index 51a1265..e5f733c 100644
--- a/src/mips64/builtins-mips64.cc
+++ b/src/mips64/builtins-mips64.cc
@@ -1044,7 +1044,7 @@
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ Uld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index d398f6f..466906a 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -3091,7 +3091,7 @@
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// v0 = exception, a1 = code object, a2 = state.
- Uld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));
+ ld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));
Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
dsrl(a2, a2, StackHandler::kKindWidth); // Handler index.
dsll(a2, a2, kPointerSizeLog2);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 10aaabf..c00ecdd 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2205,7 +2205,7 @@
}
-Object* FixedArray::get(int index) {
+Object* FixedArray::get(int index) const {
SLOW_DCHECK(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
@@ -5203,8 +5203,9 @@
bool Map::CanHaveMoreTransitions() {
if (!HasTransitionArray()) return true;
- return transitions()->number_of_transitions() <=
- TransitionArray::kMaxNumberOfTransitions;
+ return FixedArray::SizeFor(transitions()->length() +
+ TransitionArray::kTransitionSize)
+ <= Page::kMaxRegularHeapObjectSize;
}
@@ -6994,14 +6995,6 @@
}
-int Map::SlackForArraySize(int old_size, int size_limit) {
- const int max_slack = size_limit - old_size;
- DCHECK(max_slack >= 0);
- if (old_size < 4) return Min(max_slack, 1);
- return Min(max_slack, old_size / 2);
-}
-
-
void JSArray::EnsureSize(Handle<JSArray> array, int required_size) {
DCHECK(array->HasFastSmiOrObjectElements());
Handle<FixedArray> elts = handle(FixedArray::cast(array->elements()));
diff --git a/src/objects.cc b/src/objects.cc
index 81eec5e..3a4b9c9 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -6584,8 +6584,7 @@
if (old_size == 0) {
descriptors = DescriptorArray::Allocate(map->GetIsolate(), 0, 1);
} else {
- EnsureDescriptorSlack(
- map, SlackForArraySize(old_size, kMaxNumberOfDescriptors));
+ EnsureDescriptorSlack(map, old_size < 4 ? 1 : old_size / 2);
descriptors = handle(map->instance_descriptors());
}
}
@@ -6610,11 +6609,8 @@
DCHECK(child->is_prototype_map());
} else {
Handle<TransitionArray> transitions =
- TransitionArray::Insert(parent, name, child, flag);
- if (!parent->HasTransitionArray() ||
- *transitions != parent->transitions()) {
- parent->set_transitions(*transitions);
- }
+ TransitionArray::CopyInsert(parent, name, child, flag);
+ parent->set_transitions(*transitions);
child->SetBackPointer(*parent);
}
}
diff --git a/src/objects.h b/src/objects.h
index 63c8d99..fbe06c7 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2415,7 +2415,7 @@
class FixedArray: public FixedArrayBase {
public:
// Setter and getter for elements.
- inline Object* get(int index);
+ inline Object* get(int index) const;
static inline Handle<Object> get(Handle<FixedArray> array, int index);
// Setter that uses write barrier.
inline void set(int index, Object* value);
@@ -5380,8 +5380,7 @@
static const int kMaxLoopNestingMarker = 6;
// Layout description.
- static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
- static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
+ static const int kRelocationInfoOffset = HeapObject::kHeaderSize;
static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
static const int kDeoptimizationDataOffset =
kHandlerTableOffset + kPointerSize;
@@ -5390,8 +5389,8 @@
kDeoptimizationDataOffset + kPointerSize;
static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize;
- static const int kICAgeOffset =
- kGCMetadataOffset + kPointerSize;
+ static const int kInstructionSizeOffset = kGCMetadataOffset + kPointerSize;
+ static const int kICAgeOffset = kInstructionSizeOffset + kIntSize;
static const int kFlagsOffset = kICAgeOffset + kIntSize;
static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlags2Offset =
@@ -6119,8 +6118,6 @@
static void AppendCallbackDescriptors(Handle<Map> map,
Handle<Object> descriptors);
- static inline int SlackForArraySize(int old_size, int size_limit);
-
static void EnsureDescriptorSlack(Handle<Map> map, int slack);
// Returns the found code or undefined if absent.
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index 6ed86a1..cc4d4b1 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -160,15 +160,6 @@
}
-void TransitionArray::SetNumberOfTransitions(int number_of_transitions) {
- if (IsFullTransitionArray()) {
- DCHECK(number_of_transitions <= number_of_transitions_storage());
- WRITE_FIELD(this, kTransitionLengthOffset,
- Smi::FromInt(number_of_transitions));
- }
-}
-
-
#undef FIELD_ADDR
#undef WRITE_FIELD
#undef CONDITIONAL_WRITE_BARRIER
diff --git a/src/transitions.cc b/src/transitions.cc
index 5e6d410..96ed870 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -13,12 +13,10 @@
Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate,
- int number_of_transitions,
- int slack) {
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(
- LengthFor(number_of_transitions + slack));
+ int number_of_transitions) {
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(ToKeyIndex(number_of_transitions));
array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
- array->set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
return Handle<TransitionArray>::cast(array);
}
@@ -76,7 +74,6 @@
if (new_nof != nof) {
DCHECK(new_nof == 0);
result->Shrink(ToKeyIndex(0));
- result->SetNumberOfTransitions(0);
} else if (nof == 1) {
result->NoIncrementalWriteBarrierCopyFrom(
containing_map->transitions(), kSimpleTransitionIndex, 0);
@@ -88,47 +85,21 @@
}
-Handle<TransitionArray> TransitionArray::Insert(Handle<Map> map,
- Handle<Name> name,
- Handle<Map> target,
- SimpleTransitionFlag flag) {
+Handle<TransitionArray> TransitionArray::CopyInsert(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Map> target,
+ SimpleTransitionFlag flag) {
if (!map->HasTransitionArray()) {
return TransitionArray::NewWith(map, name, target, flag);
}
int number_of_transitions = map->transitions()->number_of_transitions();
- int new_nof = number_of_transitions;
+ int new_size = number_of_transitions;
int insertion_index = map->transitions()->Search(*name);
- if (insertion_index == kNotFound) ++new_nof;
- DCHECK(new_nof <= kMaxNumberOfTransitions);
+ if (insertion_index == kNotFound) ++new_size;
- if (new_nof <= map->transitions()->number_of_transitions_storage()) {
- DisallowHeapAllocation no_gc;
- TransitionArray* array = map->transitions();
-
- if (insertion_index != kNotFound) {
- array->SetTarget(insertion_index, *target);
- return handle(array);
- }
-
- array->SetNumberOfTransitions(new_nof);
- uint32_t hash = name->Hash();
- for (insertion_index = number_of_transitions; insertion_index > 0;
- --insertion_index) {
- Name* key = array->GetKey(insertion_index - 1);
- if (key->Hash() <= hash) break;
- array->SetKey(insertion_index, key);
- array->SetTarget(insertion_index, array->GetTarget(insertion_index - 1));
- }
- array->SetKey(insertion_index, *name);
- array->SetTarget(insertion_index, *target);
- return handle(array);
- }
-
- Handle<TransitionArray> result = Allocate(
- map->GetIsolate(), new_nof,
- Map::SlackForArraySize(number_of_transitions, kMaxNumberOfTransitions));
+ Handle<TransitionArray> result = Allocate(map->GetIsolate(), new_size);
// The map's transition array may grown smaller during the allocation above as
// it was weakly traversed, though it is guaranteed not to disappear. Trim the
@@ -140,19 +111,29 @@
DCHECK(array->number_of_transitions() < number_of_transitions);
number_of_transitions = array->number_of_transitions();
- new_nof = number_of_transitions;
+ new_size = number_of_transitions;
insertion_index = array->Search(*name);
- if (insertion_index == kNotFound) ++new_nof;
+ if (insertion_index == kNotFound) ++new_size;
- result->Shrink(ToKeyIndex(new_nof));
- result->SetNumberOfTransitions(new_nof);
+ result->Shrink(ToKeyIndex(new_size));
}
if (array->HasPrototypeTransitions()) {
result->SetPrototypeTransitions(array->GetPrototypeTransitions());
}
+ if (insertion_index != kNotFound) {
+ for (int i = 0; i < number_of_transitions; ++i) {
+ if (i != insertion_index) {
+ result->NoIncrementalWriteBarrierCopyFrom(array, i, i);
+ }
+ }
+ result->NoIncrementalWriteBarrierSet(insertion_index, *name, *target);
+ result->set_back_pointer_storage(array->back_pointer_storage());
+ return result;
+ }
+
insertion_index = 0;
for (; insertion_index < number_of_transitions; ++insertion_index) {
if (InsertionPointFound(array->GetKey(insertion_index), *name)) break;
diff --git a/src/transitions.h b/src/transitions.h
index b7e4ebe..aa9b7b8 100644
--- a/src/transitions.h
+++ b/src/transitions.h
@@ -30,9 +30,8 @@
// The full format is:
// [0] Undefined or back pointer map
// [1] Smi(0) or fixed array of prototype transitions
-// [2] Number of transitions
-// [3] First transition
-// [3 + number of transitions * kTransitionSize]: start of slack
+// [2] First transition
+// [length() - kTransitionSize] Last transition
class TransitionArray: public FixedArray {
public:
// Accessors for fetching instance transition at transition number.
@@ -68,21 +67,10 @@
// Returns the number of transitions in the array.
int number_of_transitions() {
if (IsSimpleTransition()) return 1;
- if (length() <= kFirstIndex) return 0;
- return Smi::cast(get(kTransitionLengthIndex))->value();
+ int len = length();
+ return len <= kFirstIndex ? 0 : (len - kFirstIndex) / kTransitionSize;
}
- int number_of_transitions_storage() {
- if (IsSimpleTransition()) return 1;
- if (length() <= kFirstIndex) return 0;
- return (length() - kFirstIndex) / kTransitionSize;
- }
-
- int NumberOfSlackTransitions() {
- return number_of_transitions_storage() - number_of_transitions();
- }
-
- inline void SetNumberOfTransitions(int number_of_transitions);
inline int number_of_entries() { return number_of_transitions(); }
// Creates a FullTransitionArray from a SimpleTransitionArray in
@@ -90,22 +78,21 @@
static Handle<TransitionArray> ExtendToFullTransitionArray(
Handle<Map> containing_map);
- // Return a transition array, using the array from the owning map if it
- // already has one (copying into a larger array if necessary), otherwise
- // creating a new one according to flag.
+ // Create a transition array, copying from the owning map if it already has
+ // one, otherwise creating a new one according to flag.
// TODO(verwaest): This should not cause an existing transition to be
// overwritten.
- static Handle<TransitionArray> Insert(Handle<Map> map, Handle<Name> name,
- Handle<Map> target,
- SimpleTransitionFlag flag);
+ static Handle<TransitionArray> CopyInsert(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Map> target,
+ SimpleTransitionFlag flag);
// Search a transition for a given property name.
inline int Search(Name* name);
// Allocates a TransitionArray.
- static Handle<TransitionArray> Allocate(Isolate* isolate,
- int number_of_transitions,
- int slack = 0);
+ static Handle<TransitionArray> Allocate(
+ Isolate* isolate, int number_of_transitions);
bool IsSimpleTransition() {
return length() == kSimpleTransitionSize &&
@@ -133,8 +120,7 @@
// Layout for full transition arrays.
static const int kPrototypeTransitionsIndex = 1;
- static const int kTransitionLengthIndex = 2;
- static const int kFirstIndex = 3;
+ static const int kFirstIndex = 2;
// Layout for simple transition arrays.
static const int kSimpleTransitionTarget = 1;
@@ -147,8 +133,6 @@
// Layout for the full transition array header.
static const int kPrototypeTransitionsOffset = kBackPointerStorageOffset +
kPointerSize;
- static const int kTransitionLengthOffset =
- kPrototypeTransitionsOffset + kPointerSize;
// Layout of map transition entries in full transition arrays.
static const int kTransitionKey = 0;
@@ -173,12 +157,6 @@
// fit in a page).
static const int kMaxNumberOfTransitions = 1024 + 512;
- // Returns the fixed array length required to hold number_of_transitions
- // transitions.
- static int LengthFor(int number_of_transitions) {
- return ToKeyIndex(number_of_transitions);
- }
-
private:
// Conversion from transition number to array indices.
static int ToKeyIndex(int transition_number) {
diff --git a/src/type-feedback-vector-inl.h b/src/type-feedback-vector-inl.h
index 477a528..612004e 100644
--- a/src/type-feedback-vector-inl.h
+++ b/src/type-feedback-vector-inl.h
@@ -10,6 +10,11 @@
namespace v8 {
namespace internal {
+int TypeFeedbackVector::ic_metadata_length() const {
+ return FLAG_vector_ics ? VectorICComputer::word_count(ICSlots()) : 0;
+}
+
+
Handle<Object> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->uninitialized_symbol();
}
diff --git a/src/type-feedback-vector.cc b/src/type-feedback-vector.cc
index dcae7c7..c0078c5 100644
--- a/src/type-feedback-vector.cc
+++ b/src/type-feedback-vector.cc
@@ -12,10 +12,75 @@
namespace internal {
// static
+TypeFeedbackVector::VectorICKind TypeFeedbackVector::FromCodeKind(
+ Code::Kind kind) {
+ switch (kind) {
+ case Code::CALL_IC:
+ return KindCallIC;
+ case Code::LOAD_IC:
+ return KindLoadIC;
+ case Code::KEYED_LOAD_IC:
+ return KindKeyedLoadIC;
+ default:
+ // Shouldn't get here.
+ UNREACHABLE();
+ }
+
+ return KindUnused;
+}
+
+
+// static
+Code::Kind TypeFeedbackVector::FromVectorICKind(VectorICKind kind) {
+ switch (kind) {
+ case KindCallIC:
+ return Code::CALL_IC;
+ case KindLoadIC:
+ return Code::LOAD_IC;
+ case KindKeyedLoadIC:
+ return Code::KEYED_LOAD_IC;
+ case KindUnused:
+ break;
+ }
+ // Sentinel for no information.
+ return Code::NUMBER_OF_KINDS;
+}
+
+
+Code::Kind TypeFeedbackVector::GetKind(FeedbackVectorICSlot slot) const {
+ if (!FLAG_vector_ics) {
+ // We only have CALL_ICs
+ return Code::CALL_IC;
+ }
+
+ int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
+ int data = Smi::cast(get(index))->value();
+ VectorICKind b = VectorICComputer::decode(data, slot.ToInt());
+ return FromVectorICKind(b);
+}
+
+
+void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot, Code::Kind kind) {
+ if (!FLAG_vector_ics) {
+ // Nothing to do if we only have CALL_ICs
+ return;
+ }
+
+ VectorICKind b = FromCodeKind(kind);
+ int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
+ int data = Smi::cast(get(index))->value();
+ int new_data = VectorICComputer::encode(data, slot.ToInt(), b);
+ set(index, Smi::FromInt(new_data));
+}
+
+
+// static
Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
int slot_count,
int ic_slot_count) {
- int length = slot_count + ic_slot_count + kReservedIndexCount;
+ int index_count =
+ FLAG_vector_ics ? VectorICComputer::word_count(ic_slot_count) : 0;
+ int length = slot_count + ic_slot_count + index_count + kReservedIndexCount;
if (length == kReservedIndexCount) {
return Handle<TypeFeedbackVector>::cast(
isolate->factory()->empty_fixed_array());
@@ -24,17 +89,21 @@
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length, TENURED);
if (ic_slot_count > 0) {
array->set(kFirstICSlotIndex,
- Smi::FromInt(slot_count + kReservedIndexCount));
+ Smi::FromInt(slot_count + index_count + kReservedIndexCount));
} else {
array->set(kFirstICSlotIndex, Smi::FromInt(length));
}
array->set(kWithTypesIndex, Smi::FromInt(0));
array->set(kGenericCountIndex, Smi::FromInt(0));
+ // Fill the indexes with zeros.
+ for (int i = 0; i < index_count; i++) {
+ array->set(kReservedIndexCount + i, Smi::FromInt(0));
+ }
// Ensure we can skip the write barrier
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
DCHECK_EQ(isolate->heap()->uninitialized_symbol(), *uninitialized_sentinel);
- for (int i = kReservedIndexCount; i < length; i++) {
+ for (int i = kReservedIndexCount + index_count; i < length; i++) {
array->set(i, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
}
return Handle<TypeFeedbackVector>::cast(array);
diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h
index ddfa8dd..61463b0 100644
--- a/src/type-feedback-vector.h
+++ b/src/type-feedback-vector.h
@@ -18,7 +18,9 @@
// 0: first_ic_slot_index (== length() if no ic slots are present)
// 1: ics_with_types
// 2: ics_with_generic_info
-// 3: first feedback slot
+// 3: type information for ic slots, if any
+// ...
+// N: first feedback slot (N >= 3)
// ...
// [<first_ic_slot_index>: feedback slot]
// ...to length() - 1
@@ -36,7 +38,7 @@
static const int kWithTypesIndex = 1;
static const int kGenericCountIndex = 2;
- int first_ic_slot_index() {
+ int first_ic_slot_index() const {
DCHECK(length() >= kReservedIndexCount);
return Smi::cast(get(kFirstICSlotIndex))->value();
}
@@ -66,53 +68,64 @@
}
}
- int Slots() {
+ inline int ic_metadata_length() const;
+
+ int Slots() const {
if (length() == 0) return 0;
- return Max(0, first_ic_slot_index() - kReservedIndexCount);
+ return Max(
+ 0, first_ic_slot_index() - ic_metadata_length() - kReservedIndexCount);
}
- int ICSlots() {
+ int ICSlots() const {
if (length() == 0) return 0;
return length() - first_ic_slot_index();
}
// Conversion from a slot or ic slot to an integer index to the underlying
// array.
- int GetIndex(FeedbackVectorSlot slot) {
- return kReservedIndexCount + slot.ToInt();
+ int GetIndex(FeedbackVectorSlot slot) const {
+ return kReservedIndexCount + ic_metadata_length() + slot.ToInt();
}
- int GetIndex(FeedbackVectorICSlot slot) {
+ int GetIndex(FeedbackVectorICSlot slot) const {
int first_ic_slot = first_ic_slot_index();
DCHECK(slot.ToInt() < ICSlots());
return first_ic_slot + slot.ToInt();
}
-
// Conversion from an integer index to either a slot or an ic slot. The caller
// should know what kind she expects.
- FeedbackVectorSlot ToSlot(int index) {
+ FeedbackVectorSlot ToSlot(int index) const {
DCHECK(index >= kReservedIndexCount && index < first_ic_slot_index());
- return FeedbackVectorSlot(index - kReservedIndexCount);
+ return FeedbackVectorSlot(index - ic_metadata_length() -
+ kReservedIndexCount);
}
- FeedbackVectorICSlot ToICSlot(int index) {
+ FeedbackVectorICSlot ToICSlot(int index) const {
DCHECK(index >= first_ic_slot_index() && index < length());
return FeedbackVectorICSlot(index - first_ic_slot_index());
}
- Object* Get(FeedbackVectorSlot slot) { return get(GetIndex(slot)); }
+ Object* Get(FeedbackVectorSlot slot) const { return get(GetIndex(slot)); }
void Set(FeedbackVectorSlot slot, Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER) {
set(GetIndex(slot), value, mode);
}
- Object* Get(FeedbackVectorICSlot slot) { return get(GetIndex(slot)); }
+ Object* Get(FeedbackVectorICSlot slot) const { return get(GetIndex(slot)); }
void Set(FeedbackVectorICSlot slot, Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER) {
set(GetIndex(slot), value, mode);
}
+ // IC slots need metadata to recognize the type of IC. Set a Kind for every
+ // slot. If GetKind() returns Code::NUMBER_OF_KINDS, then there is
+ // no kind associated with this slot. This may happen in the current design
+ // if a decision is made at compile time not to emit an IC that was planned
+ // for at parse time. This can be eliminated if we encode kind at parse
+ // time.
+ Code::Kind GetKind(FeedbackVectorICSlot slot) const;
+ void SetKind(FeedbackVectorICSlot slot, Code::Kind kind);
static Handle<TypeFeedbackVector> Allocate(Isolate* isolate, int slot_count,
int ic_slot_count);
@@ -145,6 +158,19 @@
static inline Object* RawUninitializedSentinel(Heap* heap);
private:
+ enum VectorICKind {
+ KindUnused = 0x0,
+ KindCallIC = 0x1,
+ KindLoadIC = 0x2,
+ KindKeyedLoadIC = 0x3
+ };
+
+ static const int kVectorICKindBits = 2;
+ static VectorICKind FromCodeKind(Code::Kind kind);
+ static Code::Kind FromVectorICKind(VectorICKind kind);
+ typedef BitSetComputer<VectorICKind, kVectorICKindBits, kSmiValueSize,
+ uint32_t> VectorICComputer;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackVector);
};
}
diff --git a/src/utils.h b/src/utils.h
index e2e10fd..dcefa44 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -239,6 +239,46 @@
// ----------------------------------------------------------------------------
+// BitSetComputer is a help template for encoding and decoding information for
+// a variable number of items in an array.
+//
+// To encode boolean data in a smi array you would use:
+// typedef BitSetComputer<bool, 1, kSmiValueSize, uint32_t> BoolComputer;
+//
+template <class T, int kBitsPerItem, int kBitsPerWord, class U>
+class BitSetComputer {
+ public:
+ static const int kItemsPerWord = kBitsPerWord / kBitsPerItem;
+ static const int kMask = (1 << kBitsPerItem) - 1;
+
+ // The number of array elements required to embed T information for each item.
+ static int word_count(int items) {
+ if (items == 0) return 0;
+ return (items - 1) / kItemsPerWord + 1;
+ }
+
+ // The array index to look at for item.
+ static int index(int base_index, int item) {
+ return base_index + item / kItemsPerWord;
+ }
+
+ // Extract T data for a given item from data.
+ static T decode(U data, int item) {
+ return static_cast<T>((data >> shift(item)) & kMask);
+ }
+
+ // Return the encoding for a store of value for item in previous.
+ static U encode(U previous, int item, T value) {
+ int shift_value = shift(item);
+ int set_bits = (static_cast<int>(value) << shift_value);
+ return (previous & ~(kMask << shift_value)) | set_bits;
+ }
+
+ static int shift(int item) { return (item % kItemsPerWord) * kBitsPerItem; }
+};
+
+
+// ----------------------------------------------------------------------------
// Hash function.
static const uint32_t kZeroHashSeed = 0;
diff --git a/src/version.cc b/src/version.cc
index bb2734c..0e20eb2 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 30
-#define BUILD_NUMBER 19
+#define BUILD_NUMBER 20
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/test/cctest/compiler/codegen-tester.h b/test/cctest/compiler/codegen-tester.h
index 6aa5bae..6af5c78 100644
--- a/test/cctest/compiler/codegen-tester.h
+++ b/test/cctest/compiler/codegen-tester.h
@@ -66,7 +66,7 @@
CallDescriptor* call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
CompilationInfo info(graph->zone()->isolate(), graph->zone());
- Linkage linkage(&info, call_descriptor);
+ Linkage linkage(graph->zone(), call_descriptor);
Pipeline pipeline(&info);
code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph, schedule);
}
diff --git a/test/cctest/compiler/function-tester.h b/test/cctest/compiler/function-tester.h
index 34c663f..eb6bd49 100644
--- a/test/cctest/compiler/function-tester.h
+++ b/test/cctest/compiler/function-tester.h
@@ -221,7 +221,7 @@
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
Pipeline pipeline(&info);
- Linkage linkage(&info);
+ Linkage linkage(info.zone(), &info);
Handle<Code> code = pipeline.GenerateCodeForMachineGraph(&linkage, graph);
CHECK(!code.is_null());
function->ReplaceCode(*code);
diff --git a/test/cctest/compiler/graph-builder-tester.cc b/test/cctest/compiler/graph-builder-tester.cc
index bfa8226..9c4379c 100644
--- a/test/cctest/compiler/graph-builder-tester.cc
+++ b/test/cctest/compiler/graph-builder-tester.cc
@@ -36,7 +36,7 @@
if (code_.is_null()) {
Zone* zone = graph_->zone();
CompilationInfo info(zone->isolate(), zone);
- Linkage linkage(&info,
+ Linkage linkage(zone,
Linkage::GetSimplifiedCDescriptor(zone, machine_sig_));
Pipeline pipeline(&info);
code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph_);
diff --git a/test/cctest/compiler/graph-builder-tester.h b/test/cctest/compiler/graph-builder-tester.h
index df79250..1bc5be7 100644
--- a/test/cctest/compiler/graph-builder-tester.h
+++ b/test/cctest/compiler/graph-builder-tester.h
@@ -27,8 +27,8 @@
protected:
virtual Node* MakeNode(const Operator* op, int value_input_count,
- Node** value_inputs) FINAL {
- return graph()->NewNode(op, value_input_count, value_inputs);
+ Node** value_inputs, bool incomplete) FINAL {
+ return graph()->NewNode(op, value_input_count, value_inputs, incomplete);
}
};
diff --git a/test/cctest/compiler/simplified-graph-builder.cc b/test/cctest/compiler/simplified-graph-builder.cc
index c44d5ed..03b4b22 100644
--- a/test/cctest/compiler/simplified-graph-builder.cc
+++ b/test/cctest/compiler/simplified-graph-builder.cc
@@ -45,7 +45,7 @@
Node* SimplifiedGraphBuilder::MakeNode(const Operator* op,
int value_input_count,
- Node** value_inputs) {
+ Node** value_inputs, bool incomplete) {
DCHECK(op->InputCount() == value_input_count);
DCHECK(!OperatorProperties::HasContextInput(op));
@@ -58,7 +58,7 @@
Node* result = NULL;
if (!has_control && !has_effect) {
- result = graph()->NewNode(op, value_input_count, value_inputs);
+ result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
int input_count_with_deps = value_input_count;
if (has_control) ++input_count_with_deps;
@@ -72,7 +72,7 @@
if (has_control) {
*current_input++ = graph()->start();
}
- result = graph()->NewNode(op, input_count_with_deps, buffer);
+ result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
if (has_effect) {
effect_ = result;
}
diff --git a/test/cctest/compiler/simplified-graph-builder.h b/test/cctest/compiler/simplified-graph-builder.h
index 82c568b..ad062e6 100644
--- a/test/cctest/compiler/simplified-graph-builder.h
+++ b/test/cctest/compiler/simplified-graph-builder.h
@@ -139,7 +139,7 @@
protected:
virtual Node* MakeNode(const Operator* op, int value_input_count,
- Node** value_inputs) FINAL;
+ Node** value_inputs, bool incomplete) FINAL;
private:
Node* effect_;
diff --git a/test/cctest/compiler/test-changes-lowering.cc b/test/cctest/compiler/test-changes-lowering.cc
index 9402e36..a015c50 100644
--- a/test/cctest/compiler/test-changes-lowering.cc
+++ b/test/cctest/compiler/test-changes-lowering.cc
@@ -125,7 +125,7 @@
void LowerChange(Node* change) {
// Run the graph reducer with changes lowering on a single node.
CompilationInfo info(this->isolate(), this->zone());
- Linkage linkage(&info);
+ Linkage linkage(this->zone(), &info);
ChangeLowering lowering(&jsgraph, &linkage);
GraphReducer reducer(this->graph());
reducer.AddReducer(&lowering);
diff --git a/test/cctest/compiler/test-codegen-deopt.cc b/test/cctest/compiler/test-codegen-deopt.cc
index 0feeda1..799fa94 100644
--- a/test/cctest/compiler/test-codegen-deopt.cc
+++ b/test/cctest/compiler/test-codegen-deopt.cc
@@ -66,7 +66,7 @@
}
// Initialize the codegen and generate code.
- Linkage* linkage = new (scope_->main_zone()) Linkage(&info);
+ Linkage* linkage = new (scope_->main_zone()) Linkage(info.zone(), &info);
code = new v8::internal::compiler::InstructionSequence(scope_->main_zone(),
graph, schedule);
SourcePositionTable source_positions(graph);
@@ -88,7 +88,7 @@
<< *code;
}
- compiler::CodeGenerator generator(&frame, linkage, code);
+ compiler::CodeGenerator generator(&frame, linkage, code, &info);
result_code = generator.GenerateCode();
#ifdef OBJECT_PRINT
diff --git a/test/cctest/compiler/test-control-reducer.cc b/test/cctest/compiler/test-control-reducer.cc
index 0e8dc17..dc80cc3 100644
--- a/test/cctest/compiler/test-control-reducer.cc
+++ b/test/cctest/compiler/test-control-reducer.cc
@@ -5,40 +5,50 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
+#include "src/base/bits.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/control-reducer.h"
#include "src/compiler/graph-inl.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
using namespace v8::internal;
using namespace v8::internal::compiler;
-class CTrimTester : HandleAndZoneScope {
- public:
- CTrimTester()
- : isolate(main_isolate()),
- common(main_zone()),
- graph(main_zone()),
- jsgraph(&graph, &common, NULL, NULL),
- start(graph.NewNode(common.Start(1))),
- p0(graph.NewNode(common.Parameter(0), start)),
- one(jsgraph.OneConstant()),
- half(jsgraph.Constant(0.5)) {
- graph.SetEnd(start);
- graph.SetStart(start);
- }
+static const size_t kNumLeafs = 4;
- Isolate* isolate;
- CommonOperatorBuilder common;
- Graph graph;
- JSGraph jsgraph;
- Node* start;
- Node* p0;
- Node* one;
- Node* half;
+// TODO(titzer): convert this whole file into unit tests.
- void Trim() { ControlReducer::TrimGraph(main_zone(), &jsgraph); }
-};
+static int CheckInputs(Node* node, Node* i0 = NULL, Node* i1 = NULL,
+ Node* i2 = NULL) {
+ int count = 3;
+ if (i2 == NULL) count = 2;
+ if (i1 == NULL) count = 1;
+ if (i0 == NULL) count = 0;
+ CHECK_EQ(count, node->InputCount());
+ if (i0 != NULL) CHECK_EQ(i0, node->InputAt(0));
+ if (i1 != NULL) CHECK_EQ(i1, node->InputAt(1));
+ if (i2 != NULL) CHECK_EQ(i2, node->InputAt(2));
+ return count;
+}
+
+
+static int CheckMerge(Node* node, Node* i0 = NULL, Node* i1 = NULL,
+ Node* i2 = NULL) {
+ CHECK_EQ(IrOpcode::kMerge, node->opcode());
+ int count = CheckInputs(node, i0, i1, i2);
+ CHECK_EQ(count, OperatorProperties::GetControlInputCount(node->op()));
+ return count;
+}
+
+
+static int CheckLoop(Node* node, Node* i0 = NULL, Node* i1 = NULL,
+ Node* i2 = NULL) {
+ CHECK_EQ(IrOpcode::kLoop, node->opcode());
+ int count = CheckInputs(node, i0, i1, i2);
+ CHECK_EQ(count, OperatorProperties::GetControlInputCount(node->op()));
+ return count;
+}
bool IsUsedBy(Node* a, Node* b) {
@@ -49,18 +59,153 @@
}
+// A helper for all tests dealing with ControlTester.
+class ControlReducerTester : HandleAndZoneScope {
+ public:
+ ControlReducerTester()
+ : isolate(main_isolate()),
+ common(main_zone()),
+ graph(main_zone()),
+ jsgraph(&graph, &common, NULL, NULL),
+ start(graph.NewNode(common.Start(1))),
+ end(graph.NewNode(common.End(), start)),
+ p0(graph.NewNode(common.Parameter(0), start)),
+ zero(jsgraph.Int32Constant(0)),
+ one(jsgraph.OneConstant()),
+ half(jsgraph.Constant(0.5)),
+ self(graph.NewNode(common.Int32Constant(0xaabbccdd))),
+ dead(graph.NewNode(common.Dead())) {
+ graph.SetEnd(end);
+ graph.SetStart(start);
+ leaf[0] = zero;
+ leaf[1] = one;
+ leaf[2] = half;
+ leaf[3] = p0;
+ }
+
+ Isolate* isolate;
+ CommonOperatorBuilder common;
+ Graph graph;
+ JSGraph jsgraph;
+ Node* start;
+ Node* end;
+ Node* p0;
+ Node* zero;
+ Node* one;
+ Node* half;
+ Node* self;
+ Node* dead;
+ Node* leaf[kNumLeafs];
+
+ Node* Phi(Node* a) {
+ return SetSelfReferences(graph.NewNode(op(1, false), a, start));
+ }
+
+ Node* Phi(Node* a, Node* b) {
+ return SetSelfReferences(graph.NewNode(op(2, false), a, b, start));
+ }
+
+ Node* Phi(Node* a, Node* b, Node* c) {
+ return SetSelfReferences(graph.NewNode(op(3, false), a, b, c, start));
+ }
+
+ Node* Phi(Node* a, Node* b, Node* c, Node* d) {
+ return SetSelfReferences(graph.NewNode(op(4, false), a, b, c, d, start));
+ }
+
+ Node* EffectPhi(Node* a) {
+ return SetSelfReferences(graph.NewNode(op(1, true), a, start));
+ }
+
+ Node* EffectPhi(Node* a, Node* b) {
+ return SetSelfReferences(graph.NewNode(op(2, true), a, b, start));
+ }
+
+ Node* EffectPhi(Node* a, Node* b, Node* c) {
+ return SetSelfReferences(graph.NewNode(op(3, true), a, b, c, start));
+ }
+
+ Node* EffectPhi(Node* a, Node* b, Node* c, Node* d) {
+ return SetSelfReferences(graph.NewNode(op(4, true), a, b, c, d, start));
+ }
+
+ Node* SetSelfReferences(Node* node) {
+ Node::Inputs inputs = node->inputs();
+ for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+ ++iter) {
+ Node* input = *iter;
+ if (input == self) node->ReplaceInput(iter.index(), node);
+ }
+ return node;
+ }
+
+ const Operator* op(int count, bool effect) {
+ return effect ? common.EffectPhi(count) : common.Phi(kMachAnyTagged, count);
+ }
+
+ void Trim() { ControlReducer::TrimGraph(main_zone(), &jsgraph); }
+
+ void ReduceGraph() {
+ ControlReducer::ReduceGraph(main_zone(), &jsgraph, &common);
+ }
+
+ // Checks one-step reduction of a phi.
+ void ReducePhi(Node* expect, Node* phi) {
+ Node* result = ControlReducer::ReducePhiForTesting(&jsgraph, &common, phi);
+ CHECK_EQ(expect, result);
+ ReducePhiIterative(expect, phi); // iterative should give the same result.
+ }
+
+ void ReducePhiIterative(Node* expect, Node* phi) {
+ p0->ReplaceInput(0, start); // hack: parameters may be trimmed.
+ Node* ret = graph.NewNode(common.Return(), phi, start, start);
+ Node* end = graph.NewNode(common.End(), ret);
+ graph.SetEnd(end);
+ ControlReducer::ReduceGraph(main_zone(), &jsgraph, &common);
+ CheckInputs(end, ret);
+ CheckInputs(ret, expect, start, start);
+ }
+
+ void ReduceMerge(Node* expect, Node* merge) {
+ Node* result =
+ ControlReducer::ReduceMergeForTesting(&jsgraph, &common, merge);
+ CHECK_EQ(expect, result);
+ }
+
+ void ReduceMergeIterative(Node* expect, Node* merge) {
+ p0->ReplaceInput(0, start); // hack: parameters may be trimmed.
+ Node* end = graph.NewNode(common.End(), merge);
+ graph.SetEnd(end);
+ ReduceGraph();
+ CheckInputs(end, expect);
+ }
+
+ void ReduceBranch(Node* expect, Node* branch) {
+ Node* result =
+ ControlReducer::ReduceBranchForTesting(&jsgraph, &common, branch);
+ CHECK_EQ(expect, result);
+ }
+
+ Node* Return(Node* val, Node* effect, Node* control) {
+ Node* ret = graph.NewNode(common.Return(), val, effect, control);
+ end->ReplaceInput(0, ret);
+ return ret;
+ }
+};
+
+
TEST(Trim1_live) {
- CTrimTester T;
+ ControlReducerTester T;
CHECK(IsUsedBy(T.start, T.p0));
T.graph.SetEnd(T.p0);
T.Trim();
CHECK(IsUsedBy(T.start, T.p0));
- CHECK_EQ(T.start, T.p0->InputAt(0));
+ CheckInputs(T.p0, T.start);
}
TEST(Trim1_dead) {
- CTrimTester T;
+ ControlReducerTester T;
CHECK(IsUsedBy(T.start, T.p0));
T.Trim();
CHECK(!IsUsedBy(T.start, T.p0));
@@ -69,7 +214,7 @@
TEST(Trim2_live) {
- CTrimTester T;
+ ControlReducerTester T;
Node* phi =
T.graph.NewNode(T.common.Phi(kMachAnyTagged, 2), T.one, T.half, T.start);
CHECK(IsUsedBy(T.one, phi));
@@ -80,14 +225,12 @@
CHECK(IsUsedBy(T.one, phi));
CHECK(IsUsedBy(T.half, phi));
CHECK(IsUsedBy(T.start, phi));
- CHECK_EQ(T.one, phi->InputAt(0));
- CHECK_EQ(T.half, phi->InputAt(1));
- CHECK_EQ(T.start, phi->InputAt(2));
+ CheckInputs(phi, T.one, T.half, T.start);
}
TEST(Trim2_dead) {
- CTrimTester T;
+ ControlReducerTester T;
Node* phi =
T.graph.NewNode(T.common.Phi(kMachAnyTagged, 2), T.one, T.half, T.start);
CHECK(IsUsedBy(T.one, phi));
@@ -104,7 +247,7 @@
TEST(Trim_chain1) {
- CTrimTester T;
+ ControlReducerTester T;
const int kDepth = 15;
Node* live[kDepth];
Node* dead[kDepth];
@@ -126,7 +269,7 @@
TEST(Trim_chain2) {
- CTrimTester T;
+ ControlReducerTester T;
const int kDepth = 15;
Node* live[kDepth];
Node* dead[kDepth];
@@ -149,7 +292,7 @@
TEST(Trim_cycle1) {
- CTrimTester T;
+ ControlReducerTester T;
Node* loop = T.graph.NewNode(T.common.Loop(1), T.start, T.start);
loop->ReplaceInput(1, loop);
Node* end = T.graph.NewNode(T.common.End(), loop);
@@ -165,14 +308,13 @@
CHECK(IsUsedBy(T.start, loop));
CHECK(IsUsedBy(loop, end));
CHECK(IsUsedBy(loop, loop));
- CHECK_EQ(T.start, loop->InputAt(0));
- CHECK_EQ(loop, loop->InputAt(1));
- CHECK_EQ(loop, end->InputAt(0));
+ CheckInputs(loop, T.start, loop);
+ CheckInputs(end, loop);
}
TEST(Trim_cycle2) {
- CTrimTester T;
+ ControlReducerTester T;
Node* loop = T.graph.NewNode(T.common.Loop(2), T.start, T.start);
loop->ReplaceInput(1, loop);
Node* end = T.graph.NewNode(T.common.End(), loop);
@@ -193,9 +335,8 @@
CHECK(IsUsedBy(T.start, loop));
CHECK(IsUsedBy(loop, end));
CHECK(IsUsedBy(loop, loop));
- CHECK_EQ(T.start, loop->InputAt(0));
- CHECK_EQ(loop, loop->InputAt(1));
- CHECK_EQ(loop, end->InputAt(0));
+ CheckInputs(loop, T.start, loop);
+ CheckInputs(end, loop);
// phi should have been trimmed away.
CHECK(!IsUsedBy(loop, phi));
@@ -207,7 +348,7 @@
}
-void CheckTrimConstant(CTrimTester* T, Node* k) {
+void CheckTrimConstant(ControlReducerTester* T, Node* k) {
Node* phi = T->graph.NewNode(T->common.Phi(kMachInt32, 1), k, T->start);
CHECK(IsUsedBy(k, phi));
T->Trim();
@@ -218,7 +359,7 @@
TEST(Trim_constants) {
- CTrimTester T;
+ ControlReducerTester T;
int32_t int32_constants[] = {
0, -1, -2, 2, 2, 3, 3, 4, 4, 5, 5, 4, 5, 6, 6, 7, 8, 7, 8, 9,
0, -11, -12, 12, 12, 13, 13, 14, 14, 15, 15, 14, 15, 6, 6, 7, 8, 7, 8, 9};
@@ -240,3 +381,1300 @@
CheckTrimConstant(&T, other_constants[i]);
}
}
+
+
+TEST(CReducePhi1) {
+ ControlReducerTester R;
+
+ R.ReducePhi(R.leaf[0], R.Phi(R.leaf[0]));
+ R.ReducePhi(R.leaf[1], R.Phi(R.leaf[1]));
+ R.ReducePhi(R.leaf[2], R.Phi(R.leaf[2]));
+ R.ReducePhi(R.leaf[3], R.Phi(R.leaf[3]));
+}
+
+
+TEST(CReducePhi1_dead) {
+ ControlReducerTester R;
+
+ R.ReducePhi(R.leaf[0], R.Phi(R.leaf[0], R.dead));
+ R.ReducePhi(R.leaf[1], R.Phi(R.leaf[1], R.dead));
+ R.ReducePhi(R.leaf[2], R.Phi(R.leaf[2], R.dead));
+ R.ReducePhi(R.leaf[3], R.Phi(R.leaf[3], R.dead));
+
+ R.ReducePhi(R.leaf[0], R.Phi(R.dead, R.leaf[0]));
+ R.ReducePhi(R.leaf[1], R.Phi(R.dead, R.leaf[1]));
+ R.ReducePhi(R.leaf[2], R.Phi(R.dead, R.leaf[2]));
+ R.ReducePhi(R.leaf[3], R.Phi(R.dead, R.leaf[3]));
+}
+
+
+TEST(CReducePhi1_dead2) {
+ ControlReducerTester R;
+
+ R.ReducePhi(R.leaf[0], R.Phi(R.leaf[0], R.dead, R.dead));
+ R.ReducePhi(R.leaf[0], R.Phi(R.dead, R.leaf[0], R.dead));
+ R.ReducePhi(R.leaf[0], R.Phi(R.dead, R.dead, R.leaf[0]));
+}
+
+
+TEST(CReducePhi2a) {
+ ControlReducerTester R;
+
+ for (size_t i = 0; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i];
+ R.ReducePhi(a, R.Phi(a, a));
+ }
+}
+
+
+TEST(CReducePhi2b) {
+ ControlReducerTester R;
+
+ for (size_t i = 0; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i];
+ R.ReducePhi(a, R.Phi(R.self, a));
+ R.ReducePhi(a, R.Phi(a, R.self));
+ }
+}
+
+
+TEST(CReducePhi2c) {
+ ControlReducerTester R;
+
+ for (size_t i = 1; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i], *b = R.leaf[0];
+ Node* phi1 = R.Phi(b, a);
+ R.ReducePhi(phi1, phi1);
+
+ Node* phi2 = R.Phi(a, b);
+ R.ReducePhi(phi2, phi2);
+ }
+}
+
+
+TEST(CReducePhi2_dead) {
+ ControlReducerTester R;
+
+ for (size_t i = 0; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i];
+ R.ReducePhi(a, R.Phi(a, a, R.dead));
+ R.ReducePhi(a, R.Phi(a, R.dead, a));
+ R.ReducePhi(a, R.Phi(R.dead, a, a));
+ }
+
+ for (size_t i = 0; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i];
+ R.ReducePhi(a, R.Phi(R.self, a));
+ R.ReducePhi(a, R.Phi(a, R.self));
+ R.ReducePhi(a, R.Phi(R.self, a, R.dead));
+ R.ReducePhi(a, R.Phi(a, R.self, R.dead));
+ }
+
+ for (size_t i = 1; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i], *b = R.leaf[0];
+ Node* phi1 = R.Phi(b, a, R.dead);
+ R.ReducePhi(phi1, phi1);
+
+ Node* phi2 = R.Phi(a, b, R.dead);
+ R.ReducePhi(phi2, phi2);
+ }
+}
+
+
+TEST(CReducePhi3) {
+ ControlReducerTester R;
+
+ for (size_t i = 0; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i];
+ R.ReducePhi(a, R.Phi(a, a, a));
+ }
+
+ for (size_t i = 0; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i];
+ R.ReducePhi(a, R.Phi(R.self, a, a));
+ R.ReducePhi(a, R.Phi(a, R.self, a));
+ R.ReducePhi(a, R.Phi(a, a, R.self));
+ }
+
+ for (size_t i = 1; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i], *b = R.leaf[0];
+ Node* phi1 = R.Phi(b, a, a);
+ R.ReducePhi(phi1, phi1);
+
+ Node* phi2 = R.Phi(a, b, a);
+ R.ReducePhi(phi2, phi2);
+
+ Node* phi3 = R.Phi(a, a, b);
+ R.ReducePhi(phi3, phi3);
+ }
+}
+
+
+TEST(CReducePhi4) {
+ ControlReducerTester R;
+
+ for (size_t i = 0; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i];
+ R.ReducePhi(a, R.Phi(a, a, a, a));
+ }
+
+ for (size_t i = 0; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i];
+ R.ReducePhi(a, R.Phi(R.self, a, a, a));
+ R.ReducePhi(a, R.Phi(a, R.self, a, a));
+ R.ReducePhi(a, R.Phi(a, a, R.self, a));
+ R.ReducePhi(a, R.Phi(a, a, a, R.self));
+
+ R.ReducePhi(a, R.Phi(R.self, R.self, a, a));
+ R.ReducePhi(a, R.Phi(a, R.self, R.self, a));
+ R.ReducePhi(a, R.Phi(a, a, R.self, R.self));
+ R.ReducePhi(a, R.Phi(R.self, a, a, R.self));
+ }
+
+ for (size_t i = 1; i < kNumLeafs; i++) {
+ Node* a = R.leaf[i], *b = R.leaf[0];
+ Node* phi1 = R.Phi(b, a, a, a);
+ R.ReducePhi(phi1, phi1);
+
+ Node* phi2 = R.Phi(a, b, a, a);
+ R.ReducePhi(phi2, phi2);
+
+ Node* phi3 = R.Phi(a, a, b, a);
+ R.ReducePhi(phi3, phi3);
+
+ Node* phi4 = R.Phi(a, a, a, b);
+ R.ReducePhi(phi4, phi4);
+ }
+}
+
+
+TEST(CReducePhi_iterative1) {
+ ControlReducerTester R;
+
+ R.ReducePhiIterative(R.leaf[0], R.Phi(R.leaf[0], R.Phi(R.leaf[0])));
+ R.ReducePhiIterative(R.leaf[0], R.Phi(R.Phi(R.leaf[0]), R.leaf[0]));
+}
+
+
+TEST(CReducePhi_iterative2) {
+ ControlReducerTester R;
+
+ R.ReducePhiIterative(R.leaf[0], R.Phi(R.Phi(R.leaf[0]), R.Phi(R.leaf[0])));
+}
+
+
+TEST(CReducePhi_iterative3) {
+ ControlReducerTester R;
+
+ R.ReducePhiIterative(R.leaf[0],
+ R.Phi(R.leaf[0], R.Phi(R.leaf[0], R.leaf[0])));
+ R.ReducePhiIterative(R.leaf[0],
+ R.Phi(R.Phi(R.leaf[0], R.leaf[0]), R.leaf[0]));
+}
+
+
+TEST(CReducePhi_iterative4) {
+ ControlReducerTester R;
+
+ R.ReducePhiIterative(R.leaf[0], R.Phi(R.Phi(R.leaf[0], R.leaf[0]),
+ R.Phi(R.leaf[0], R.leaf[0])));
+
+ Node* p1 = R.Phi(R.leaf[0], R.leaf[0]);
+ R.ReducePhiIterative(R.leaf[0], R.Phi(p1, p1));
+
+ Node* p2 = R.Phi(R.leaf[0], R.leaf[0], R.leaf[0]);
+ R.ReducePhiIterative(R.leaf[0], R.Phi(p2, p2, p2));
+
+ Node* p3 = R.Phi(R.leaf[0], R.leaf[0], R.leaf[0]);
+ R.ReducePhiIterative(R.leaf[0], R.Phi(p3, p3, R.leaf[0]));
+}
+
+
+TEST(CReducePhi_iterative_self1) {
+ ControlReducerTester R;
+
+ R.ReducePhiIterative(R.leaf[0], R.Phi(R.leaf[0], R.Phi(R.leaf[0], R.self)));
+ R.ReducePhiIterative(R.leaf[0], R.Phi(R.Phi(R.leaf[0], R.self), R.leaf[0]));
+}
+
+
+TEST(CReducePhi_iterative_self2) {
+ ControlReducerTester R;
+
+ R.ReducePhiIterative(
+ R.leaf[0], R.Phi(R.Phi(R.leaf[0], R.self), R.Phi(R.leaf[0], R.self)));
+ R.ReducePhiIterative(
+ R.leaf[0], R.Phi(R.Phi(R.self, R.leaf[0]), R.Phi(R.self, R.leaf[0])));
+
+ Node* p1 = R.Phi(R.leaf[0], R.self);
+ R.ReducePhiIterative(R.leaf[0], R.Phi(p1, p1));
+
+ Node* p2 = R.Phi(R.self, R.leaf[0]);
+ R.ReducePhiIterative(R.leaf[0], R.Phi(p2, p2));
+}
+
+
+TEST(EReducePhi1) {
+ ControlReducerTester R;
+
+ R.ReducePhi(R.leaf[0], R.EffectPhi(R.leaf[0]));
+ R.ReducePhi(R.leaf[1], R.EffectPhi(R.leaf[1]));
+ R.ReducePhi(R.leaf[2], R.EffectPhi(R.leaf[2]));
+ R.ReducePhi(R.leaf[3], R.EffectPhi(R.leaf[3]));
+}
+
+
+TEST(EReducePhi1_dead) {
+ ControlReducerTester R;
+
+ R.ReducePhi(R.leaf[0], R.EffectPhi(R.leaf[0], R.dead));
+ R.ReducePhi(R.leaf[1], R.EffectPhi(R.leaf[1], R.dead));
+ R.ReducePhi(R.leaf[2], R.EffectPhi(R.leaf[2], R.dead));
+ R.ReducePhi(R.leaf[3], R.EffectPhi(R.leaf[3], R.dead));
+
+ R.ReducePhi(R.leaf[0], R.EffectPhi(R.dead, R.leaf[0]));
+ R.ReducePhi(R.leaf[1], R.EffectPhi(R.dead, R.leaf[1]));
+ R.ReducePhi(R.leaf[2], R.EffectPhi(R.dead, R.leaf[2]));
+ R.ReducePhi(R.leaf[3], R.EffectPhi(R.dead, R.leaf[3]));
+}
+
+
+TEST(EReducePhi1_dead2) {
+ ControlReducerTester R;
+
+ R.ReducePhi(R.leaf[0], R.EffectPhi(R.leaf[0], R.dead, R.dead));
+ R.ReducePhi(R.leaf[0], R.EffectPhi(R.dead, R.leaf[0], R.dead));
+ R.ReducePhi(R.leaf[0], R.EffectPhi(R.dead, R.dead, R.leaf[0]));
+}
+
+
+TEST(CMergeReduce_simple1) {
+ ControlReducerTester R;
+
+ Node* merge = R.graph.NewNode(R.common.Merge(1), R.start);
+ R.ReduceMerge(R.start, merge);
+}
+
+
+TEST(CMergeReduce_simple2) {
+ ControlReducerTester R;
+
+ Node* merge1 = R.graph.NewNode(R.common.Merge(1), R.start);
+ Node* merge2 = R.graph.NewNode(R.common.Merge(1), merge1);
+ R.ReduceMerge(merge1, merge2);
+ R.ReduceMergeIterative(R.start, merge2);
+}
+
+
+TEST(CMergeReduce_none1) {
+ ControlReducerTester R;
+
+ Node* merge = R.graph.NewNode(R.common.Merge(2), R.start, R.start);
+ R.ReduceMerge(merge, merge);
+}
+
+
+TEST(CMergeReduce_none2) {
+ ControlReducerTester R;
+
+ Node* t = R.graph.NewNode(R.common.IfTrue(), R.start);
+ Node* f = R.graph.NewNode(R.common.IfFalse(), R.start);
+ Node* merge = R.graph.NewNode(R.common.Merge(2), t, f);
+ R.ReduceMerge(merge, merge);
+}
+
+
+TEST(CMergeReduce_self3) {
+ ControlReducerTester R;
+
+ Node* merge =
+ R.SetSelfReferences(R.graph.NewNode(R.common.Merge(2), R.start, R.self));
+ R.ReduceMerge(merge, merge);
+}
+
+
+TEST(CMergeReduce_dead1) {
+ ControlReducerTester R;
+
+ Node* merge = R.graph.NewNode(R.common.Merge(2), R.start, R.dead);
+ R.ReduceMerge(R.start, merge);
+}
+
+
+TEST(CMergeReduce_dead2) {
+ ControlReducerTester R;
+
+ Node* merge1 = R.graph.NewNode(R.common.Merge(1), R.start);
+ Node* merge2 = R.graph.NewNode(R.common.Merge(2), merge1, R.dead);
+ R.ReduceMerge(merge1, merge2);
+ R.ReduceMergeIterative(R.start, merge2);
+}
+
+
+TEST(CMergeReduce_dead_rm1a) {
+ ControlReducerTester R;
+
+ for (int i = 0; i < 3; i++) {
+ Node* merge = R.graph.NewNode(R.common.Merge(3), R.start, R.start, R.start);
+ merge->ReplaceInput(i, R.dead);
+ R.ReduceMerge(merge, merge);
+ CheckMerge(merge, R.start, R.start);
+ }
+}
+
+
+TEST(CMergeReduce_dead_rm1b) {
+ ControlReducerTester R;
+
+ Node* t = R.graph.NewNode(R.common.IfTrue(), R.start);
+ Node* f = R.graph.NewNode(R.common.IfFalse(), R.start);
+ for (int i = 0; i < 2; i++) {
+ Node* merge = R.graph.NewNode(R.common.Merge(3), R.dead, R.dead, R.dead);
+ for (int j = i + 1; j < 3; j++) {
+ merge->ReplaceInput(i, t);
+ merge->ReplaceInput(j, f);
+ R.ReduceMerge(merge, merge);
+ CheckMerge(merge, t, f);
+ }
+ }
+}
+
+
+TEST(CMergeReduce_dead_rm2) {
+ ControlReducerTester R;
+
+ for (int i = 0; i < 3; i++) {
+ Node* merge = R.graph.NewNode(R.common.Merge(3), R.dead, R.dead, R.dead);
+ merge->ReplaceInput(i, R.start);
+ R.ReduceMerge(R.start, merge);
+ }
+}
+
+
+TEST(CLoopReduce_dead_rm1) {
+ ControlReducerTester R;
+
+ for (int i = 0; i < 3; i++) {
+ Node* loop = R.graph.NewNode(R.common.Loop(3), R.dead, R.start, R.start);
+ R.ReduceMerge(loop, loop);
+ CheckLoop(loop, R.start, R.start);
+ }
+}
+
+
+TEST(CMergeReduce_edit_phi1) {
+ ControlReducerTester R;
+
+ for (int i = 0; i < 3; i++) {
+ Node* merge = R.graph.NewNode(R.common.Merge(3), R.start, R.start, R.start);
+ merge->ReplaceInput(i, R.dead);
+ Node* phi = R.graph.NewNode(R.common.Phi(kMachAnyTagged, 3), R.leaf[0],
+ R.leaf[1], R.leaf[2], merge);
+ R.ReduceMerge(merge, merge);
+ CHECK_EQ(IrOpcode::kPhi, phi->opcode());
+ CHECK_EQ(2, phi->op()->InputCount());
+ CHECK_EQ(3, phi->InputCount());
+ CHECK_EQ(R.leaf[i < 1 ? 1 : 0], phi->InputAt(0));
+ CHECK_EQ(R.leaf[i < 2 ? 2 : 1], phi->InputAt(1));
+ CHECK_EQ(merge, phi->InputAt(2));
+ }
+}
+
+
+TEST(CMergeReduce_edit_effect_phi1) {
+ ControlReducerTester R;
+
+ for (int i = 0; i < 3; i++) {
+ Node* merge = R.graph.NewNode(R.common.Merge(3), R.start, R.start, R.start);
+ merge->ReplaceInput(i, R.dead);
+ Node* phi = R.graph.NewNode(R.common.EffectPhi(3), R.leaf[0], R.leaf[1],
+ R.leaf[2], merge);
+ R.ReduceMerge(merge, merge);
+ CHECK_EQ(IrOpcode::kEffectPhi, phi->opcode());
+ CHECK_EQ(0, phi->op()->InputCount());
+ CHECK_EQ(2, OperatorProperties::GetEffectInputCount(phi->op()));
+ CHECK_EQ(3, phi->InputCount());
+ CHECK_EQ(R.leaf[i < 1 ? 1 : 0], phi->InputAt(0));
+ CHECK_EQ(R.leaf[i < 2 ? 2 : 1], phi->InputAt(1));
+ CHECK_EQ(merge, phi->InputAt(2));
+ }
+}
+
+
+static const int kSelectorSize = 4;
+
+// Helper to select K of N nodes according to a mask, useful for the test below.
+struct Selector {
+ int mask;
+ int count;
+ explicit Selector(int m) {
+ mask = m;
+ count = v8::base::bits::CountPopulation32(m);
+ }
+ bool is_selected(int i) { return (mask & (1 << i)) != 0; }
+ void CheckNode(Node* node, IrOpcode::Value opcode, Node** inputs,
+ Node* control) {
+ CHECK_EQ(opcode, node->opcode());
+ CHECK_EQ(count + (control != NULL ? 1 : 0), node->InputCount());
+ int index = 0;
+ for (int i = 0; i < kSelectorSize; i++) {
+ if (mask & (1 << i)) {
+ CHECK_EQ(inputs[i], node->InputAt(index++));
+ }
+ }
+ CHECK_EQ(count, index);
+ if (control != NULL) CHECK_EQ(control, node->InputAt(index++));
+ }
+ int single_index() {
+ CHECK_EQ(1, count);
+ return WhichPowerOf2(mask);
+ }
+};
+
+
+TEST(CMergeReduce_exhaustive_4) {
+ ControlReducerTester R;
+ Node* controls[] = {
+ R.graph.NewNode(R.common.Start(1)), R.graph.NewNode(R.common.Start(2)),
+ R.graph.NewNode(R.common.Start(3)), R.graph.NewNode(R.common.Start(4))};
+ Node* values[] = {R.jsgraph.Int32Constant(11), R.jsgraph.Int32Constant(22),
+ R.jsgraph.Int32Constant(33), R.jsgraph.Int32Constant(44)};
+ Node* effects[] = {
+ R.jsgraph.Float64Constant(123.4), R.jsgraph.Float64Constant(223.4),
+ R.jsgraph.Float64Constant(323.4), R.jsgraph.Float64Constant(423.4)};
+
+ for (int mask = 0; mask < (1 << (kSelectorSize - 1)); mask++) {
+ // Reduce a single merge with a given mask.
+ Node* merge = R.graph.NewNode(R.common.Merge(4), controls[0], controls[1],
+ controls[2], controls[3]);
+ Node* phi = R.graph.NewNode(R.common.Phi(kMachAnyTagged, 4), values[0],
+ values[1], values[2], values[3], merge);
+ Node* ephi = R.graph.NewNode(R.common.EffectPhi(4), effects[0], effects[1],
+ effects[2], effects[3], merge);
+
+ Node* phi_use =
+ R.graph.NewNode(R.common.Phi(kMachAnyTagged, 1), phi, R.start);
+ Node* ephi_use = R.graph.NewNode(R.common.EffectPhi(1), ephi, R.start);
+
+ Selector selector(mask);
+
+ for (int i = 0; i < kSelectorSize; i++) { // set up dead merge inputs.
+ if (!selector.is_selected(i)) merge->ReplaceInput(i, R.dead);
+ }
+
+ Node* result =
+ ControlReducer::ReduceMergeForTesting(&R.jsgraph, &R.common, merge);
+
+ int count = selector.count;
+ if (count == 0) {
+ // result should be dead.
+ CHECK_EQ(IrOpcode::kDead, result->opcode());
+ } else if (count == 1) {
+ // merge should be replaced with one of the controls.
+ CHECK_EQ(controls[selector.single_index()], result);
+ // Phis should have been directly replaced.
+ CHECK_EQ(values[selector.single_index()], phi_use->InputAt(0));
+ CHECK_EQ(effects[selector.single_index()], ephi_use->InputAt(0));
+ } else {
+ // Otherwise, nodes should be edited in place.
+ CHECK_EQ(merge, result);
+ selector.CheckNode(merge, IrOpcode::kMerge, controls, NULL);
+ selector.CheckNode(phi, IrOpcode::kPhi, values, merge);
+ selector.CheckNode(ephi, IrOpcode::kEffectPhi, effects, merge);
+ CHECK_EQ(phi, phi_use->InputAt(0));
+ CHECK_EQ(ephi, ephi_use->InputAt(0));
+ CHECK_EQ(count, phi->op()->InputCount());
+ CHECK_EQ(count + 1, phi->InputCount());
+ CHECK_EQ(count, OperatorProperties::GetEffectInputCount(ephi->op()));
+ CHECK_EQ(count + 1, ephi->InputCount());
+ }
+ }
+}
+
+
+TEST(CMergeReduce_edit_many_phis1) {
+ ControlReducerTester R;
+
+ const int kPhiCount = 10;
+ Node* phis[kPhiCount];
+
+ for (int i = 0; i < 3; i++) {
+ Node* merge = R.graph.NewNode(R.common.Merge(3), R.start, R.start, R.start);
+ merge->ReplaceInput(i, R.dead);
+ for (int j = 0; j < kPhiCount; j++) {
+ phis[j] = R.graph.NewNode(R.common.Phi(kMachAnyTagged, 3), R.leaf[0],
+ R.leaf[1], R.leaf[2], merge);
+ }
+ R.ReduceMerge(merge, merge);
+ for (int j = 0; j < kPhiCount; j++) {
+ Node* phi = phis[j];
+ CHECK_EQ(IrOpcode::kPhi, phi->opcode());
+ CHECK_EQ(2, phi->op()->InputCount());
+ CHECK_EQ(3, phi->InputCount());
+ CHECK_EQ(R.leaf[i < 1 ? 1 : 0], phi->InputAt(0));
+ CHECK_EQ(R.leaf[i < 2 ? 2 : 1], phi->InputAt(1));
+ CHECK_EQ(merge, phi->InputAt(2));
+ }
+ }
+}
+
+
+TEST(CMergeReduce_simple_chain1) {
+ ControlReducerTester R;
+ for (int i = 0; i < 5; i++) {
+ Node* merge = R.graph.NewNode(R.common.Merge(1), R.start);
+ for (int j = 0; j < i; j++) {
+ merge = R.graph.NewNode(R.common.Merge(1), merge);
+ }
+ R.ReduceMergeIterative(R.start, merge);
+ }
+}
+
+
+TEST(CMergeReduce_dead_chain1) {
+ ControlReducerTester R;
+ for (int i = 0; i < 5; i++) {
+ Node* merge = R.graph.NewNode(R.common.Merge(1), R.dead);
+ for (int j = 0; j < i; j++) {
+ merge = R.graph.NewNode(R.common.Merge(1), merge);
+ }
+ Node* end = R.graph.NewNode(R.common.End(), merge);
+ R.graph.SetEnd(end);
+ R.ReduceGraph();
+ CHECK(merge->IsDead());
+ CHECK_EQ(NULL, end->InputAt(0)); // end dies.
+ }
+}
+
+
+TEST(CMergeReduce_dead_chain2) {
+ ControlReducerTester R;
+ for (int i = 0; i < 5; i++) {
+ Node* merge = R.graph.NewNode(R.common.Merge(1), R.start);
+ for (int j = 0; j < i; j++) {
+ merge = R.graph.NewNode(R.common.Merge(2), merge, R.dead);
+ }
+ R.ReduceMergeIterative(R.start, merge);
+ }
+}
+
+
+struct Branch {
+ Node* branch;
+ Node* if_true;
+ Node* if_false;
+
+ Branch(ControlReducerTester& R, Node* cond, Node* control = NULL) {
+ if (control == NULL) control = R.start;
+ branch = R.graph.NewNode(R.common.Branch(), cond, control);
+ if_true = R.graph.NewNode(R.common.IfTrue(), branch);
+ if_false = R.graph.NewNode(R.common.IfFalse(), branch);
+ }
+};
+
+
+struct Diamond {
+ Node* branch;
+ Node* if_true;
+ Node* if_false;
+ Node* merge;
+ Node* phi;
+
+ Diamond(ControlReducerTester& R, Node* cond) {
+ branch = R.graph.NewNode(R.common.Branch(), cond, R.start);
+ if_true = R.graph.NewNode(R.common.IfTrue(), branch);
+ if_false = R.graph.NewNode(R.common.IfFalse(), branch);
+ merge = R.graph.NewNode(R.common.Merge(2), if_true, if_false);
+ phi = NULL;
+ }
+
+ Diamond(ControlReducerTester& R, Node* cond, Node* tv, Node* fv) {
+ branch = R.graph.NewNode(R.common.Branch(), cond, R.start);
+ if_true = R.graph.NewNode(R.common.IfTrue(), branch);
+ if_false = R.graph.NewNode(R.common.IfFalse(), branch);
+ merge = R.graph.NewNode(R.common.Merge(2), if_true, if_false);
+ phi = R.graph.NewNode(R.common.Phi(kMachAnyTagged, 2), tv, fv, merge);
+ }
+
+ void chain(Diamond& that) { branch->ReplaceInput(1, that.merge); }
+
+ // Nest {this} into either the if_true or if_false branch of {that}.
+ void nest(Diamond& that, bool if_true) {
+ if (if_true) {
+ branch->ReplaceInput(1, that.if_true);
+ that.merge->ReplaceInput(0, merge);
+ } else {
+ branch->ReplaceInput(1, that.if_false);
+ that.merge->ReplaceInput(1, merge);
+ }
+ }
+};
+
+
+struct While {
+ Node* branch;
+ Node* if_true;
+ Node* exit;
+ Node* loop;
+
+ While(ControlReducerTester& R, Node* cond) {
+ loop = R.graph.NewNode(R.common.Loop(2), R.start, R.start);
+ branch = R.graph.NewNode(R.common.Branch(), cond, loop);
+ if_true = R.graph.NewNode(R.common.IfTrue(), branch);
+ exit = R.graph.NewNode(R.common.IfFalse(), branch);
+ loop->ReplaceInput(1, if_true);
+ }
+
+ void chain(Node* control) { loop->ReplaceInput(0, control); }
+};
+
+
+TEST(CBranchReduce_none1) {
+ ControlReducerTester R;
+ Diamond d(R, R.p0);
+ R.ReduceBranch(d.branch, d.branch);
+}
+
+
+TEST(CBranchReduce_none2) {
+ ControlReducerTester R;
+ Diamond d1(R, R.p0);
+ Diamond d2(R, R.p0);
+ d2.chain(d1);
+ R.ReduceBranch(d2.branch, d2.branch);
+}
+
+
+TEST(CBranchReduce_true) {
+ ControlReducerTester R;
+ Node* true_values[] = {
+ R.one, R.jsgraph.Int32Constant(2),
+ R.jsgraph.Int32Constant(0x7fffffff), R.jsgraph.Constant(1.0),
+ R.jsgraph.Constant(22.1), R.jsgraph.TrueConstant()};
+
+ for (size_t i = 0; i < arraysize(true_values); i++) {
+ Diamond d(R, true_values[i]);
+ Node* true_use = R.graph.NewNode(R.common.Merge(1), d.if_true);
+ Node* false_use = R.graph.NewNode(R.common.Merge(1), d.if_false);
+ R.ReduceBranch(R.start, d.branch);
+ CHECK_EQ(R.start, true_use->InputAt(0));
+ CHECK_EQ(IrOpcode::kDead, false_use->InputAt(0)->opcode());
+ CHECK(d.if_true->IsDead()); // replaced
+ CHECK(d.if_false->IsDead()); // replaced
+ }
+}
+
+
+TEST(CBranchReduce_false) {
+ ControlReducerTester R;
+ Node* false_values[] = {R.zero, R.jsgraph.Constant(0.0),
+ R.jsgraph.Constant(-0.0), R.jsgraph.FalseConstant()};
+
+ for (size_t i = 0; i < arraysize(false_values); i++) {
+ Diamond d(R, false_values[i]);
+ Node* true_use = R.graph.NewNode(R.common.Merge(1), d.if_true);
+ Node* false_use = R.graph.NewNode(R.common.Merge(1), d.if_false);
+ R.ReduceBranch(R.start, d.branch);
+ CHECK_EQ(R.start, false_use->InputAt(0));
+ CHECK_EQ(IrOpcode::kDead, true_use->InputAt(0)->opcode());
+ CHECK(d.if_true->IsDead()); // replaced
+ CHECK(d.if_false->IsDead()); // replaced
+ }
+}
+
+
+TEST(CDiamondReduce_true) {
+ ControlReducerTester R;
+ Diamond d1(R, R.one);
+ R.ReduceMergeIterative(R.start, d1.merge);
+}
+
+
+TEST(CDiamondReduce_false) {
+ ControlReducerTester R;
+ Diamond d2(R, R.zero);
+ R.ReduceMergeIterative(R.start, d2.merge);
+}
+
+
+TEST(CChainedDiamondsReduce_true_false) {
+ ControlReducerTester R;
+ Diamond d1(R, R.one);
+ Diamond d2(R, R.zero);
+ d2.chain(d1);
+
+ R.ReduceMergeIterative(R.start, d2.merge);
+}
+
+
+TEST(CChainedDiamondsReduce_x_false) {
+ ControlReducerTester R;
+ Diamond d1(R, R.p0);
+ Diamond d2(R, R.zero);
+ d2.chain(d1);
+
+ R.ReduceMergeIterative(d1.merge, d2.merge);
+}
+
+
+TEST(CChainedDiamondsReduce_false_x) {
+ ControlReducerTester R;
+ Diamond d1(R, R.zero);
+ Diamond d2(R, R.p0);
+ d2.chain(d1);
+
+ R.ReduceMergeIterative(d2.merge, d2.merge);
+ CheckInputs(d2.branch, R.p0, R.start);
+}
+
+
+TEST(CChainedDiamondsReduce_phi1) {
+ ControlReducerTester R;
+ Diamond d1(R, R.zero, R.one, R.zero); // foldable branch, phi.
+ Diamond d2(R, d1.phi);
+ d2.chain(d1);
+
+ R.ReduceMergeIterative(R.start, d2.merge);
+}
+
+
+TEST(CChainedDiamondsReduce_phi2) {
+ ControlReducerTester R;
+ Diamond d1(R, R.p0, R.one, R.one); // redundant phi.
+ Diamond d2(R, d1.phi);
+ d2.chain(d1);
+
+ R.ReduceMergeIterative(d1.merge, d2.merge);
+}
+
+
+TEST(CNestedDiamondsReduce_true_true_false) {
+ ControlReducerTester R;
+ Diamond d1(R, R.one);
+ Diamond d2(R, R.zero);
+ d2.nest(d1, true);
+
+ R.ReduceMergeIterative(R.start, d1.merge);
+}
+
+
+TEST(CNestedDiamondsReduce_false_true_false) {
+ ControlReducerTester R;
+ Diamond d1(R, R.one);
+ Diamond d2(R, R.zero);
+ d2.nest(d1, false);
+
+ R.ReduceMergeIterative(R.start, d1.merge);
+}
+
+
+TEST(CNestedDiamonds_xyz) {
+ ControlReducerTester R;
+
+ for (int a = 0; a < 2; a++) {
+ for (int b = 0; b < 2; b++) {
+ for (int c = 0; c < 2; c++) {
+ Diamond d1(R, R.jsgraph.Int32Constant(a));
+ Diamond d2(R, R.jsgraph.Int32Constant(b));
+ d2.nest(d1, c);
+
+ R.ReduceMergeIterative(R.start, d1.merge);
+ }
+ }
+ }
+}
+
+
+TEST(CDeadLoop1) {
+ ControlReducerTester R;
+
+ Node* loop = R.graph.NewNode(R.common.Loop(1), R.start);
+ Branch b(R, R.p0, loop);
+ loop->ReplaceInput(0, b.if_true); // loop is not connected to start.
+ Node* merge = R.graph.NewNode(R.common.Merge(2), R.start, b.if_false);
+ R.ReduceMergeIterative(R.start, merge);
+ CHECK(b.if_true->IsDead());
+ CHECK(b.if_false->IsDead());
+}
+
+
+TEST(CDeadLoop2) {
+ ControlReducerTester R;
+
+ While w(R, R.p0);
+ Diamond d(R, R.zero);
+ // if (0) { while (p0) ; } else { }
+ w.branch->ReplaceInput(1, d.if_true);
+ d.merge->ReplaceInput(0, w.exit);
+
+ R.ReduceMergeIterative(R.start, d.merge);
+ CHECK(d.if_true->IsDead());
+ CHECK(d.if_false->IsDead());
+}
+
+
+TEST(CNonTermLoop1) {
+ ControlReducerTester R;
+ Node* loop =
+ R.SetSelfReferences(R.graph.NewNode(R.common.Loop(2), R.start, R.self));
+ R.ReduceGraph();
+ Node* end = R.graph.end();
+ CheckLoop(loop, R.start, loop);
+ Node* merge = end->InputAt(0);
+ CheckMerge(merge, R.start, loop);
+}
+
+
+TEST(CNonTermLoop2) {
+ ControlReducerTester R;
+ Diamond d(R, R.p0);
+ Node* loop = R.SetSelfReferences(
+ R.graph.NewNode(R.common.Loop(2), d.if_false, R.self));
+ d.merge->ReplaceInput(1, R.dead);
+ Node* end = R.graph.end();
+ end->ReplaceInput(0, d.merge);
+ R.ReduceGraph();
+ CHECK_EQ(end, R.graph.end());
+ CheckLoop(loop, d.if_false, loop);
+ Node* merge = end->InputAt(0);
+ CheckMerge(merge, d.if_true, loop);
+}
+
+
+TEST(NonTermLoop3) {
+ ControlReducerTester R;
+ Node* loop = R.graph.NewNode(R.common.Loop(2), R.start, R.start);
+ Branch b(R, R.one, loop);
+ loop->ReplaceInput(1, b.if_true);
+ Node* end = R.graph.end();
+ end->ReplaceInput(0, b.if_false);
+
+ R.ReduceGraph();
+
+ CHECK_EQ(end, R.graph.end());
+ CheckInputs(end, loop);
+ CheckInputs(loop, R.start, loop);
+}
+
+
+TEST(CNonTermLoop_terminate1) {
+ ControlReducerTester R;
+ Node* loop = R.graph.NewNode(R.common.Loop(2), R.start, R.start);
+ Node* effect = R.SetSelfReferences(
+ R.graph.NewNode(R.common.EffectPhi(2), R.start, R.self, loop));
+ Branch b(R, R.one, loop);
+ loop->ReplaceInput(1, b.if_true);
+ Node* end = R.graph.end();
+ end->ReplaceInput(0, b.if_false);
+
+ R.ReduceGraph();
+
+ CHECK_EQ(end, R.graph.end());
+ CheckLoop(loop, R.start, loop);
+ Node* terminate = end->InputAt(0);
+ CHECK_EQ(IrOpcode::kTerminate, terminate->opcode());
+ CHECK_EQ(2, terminate->InputCount());
+ CHECK_EQ(1, OperatorProperties::GetEffectInputCount(terminate->op()));
+ CHECK_EQ(1, OperatorProperties::GetControlInputCount(terminate->op()));
+ CheckInputs(terminate, effect, loop);
+}
+
+
+TEST(CNonTermLoop_terminate2) {
+ ControlReducerTester R;
+ Node* loop = R.graph.NewNode(R.common.Loop(2), R.start, R.start);
+ Node* effect1 = R.SetSelfReferences(
+ R.graph.NewNode(R.common.EffectPhi(2), R.start, R.self, loop));
+ Node* effect2 = R.SetSelfReferences(
+ R.graph.NewNode(R.common.EffectPhi(2), R.start, R.self, loop));
+ Branch b(R, R.one, loop);
+ loop->ReplaceInput(1, b.if_true);
+ Node* end = R.graph.end();
+ end->ReplaceInput(0, b.if_false);
+
+ R.ReduceGraph();
+
+ CheckLoop(loop, R.start, loop);
+ CHECK_EQ(end, R.graph.end());
+ Node* terminate = end->InputAt(0);
+ CHECK_EQ(IrOpcode::kTerminate, terminate->opcode());
+ CHECK_EQ(3, terminate->InputCount());
+ CHECK_EQ(2, OperatorProperties::GetEffectInputCount(terminate->op()));
+ CHECK_EQ(1, OperatorProperties::GetControlInputCount(terminate->op()));
+ Node* e0 = terminate->InputAt(0);
+ Node* e1 = terminate->InputAt(1);
+ CHECK(e0 == effect1 || e1 == effect1);
+ CHECK(e0 == effect2 || e1 == effect2);
+ CHECK_EQ(loop, terminate->InputAt(2));
+}
+
+
+TEST(CNonTermLoop_terminate_m1) {
+ ControlReducerTester R;
+ Node* loop =
+ R.SetSelfReferences(R.graph.NewNode(R.common.Loop(2), R.start, R.self));
+ Node* effect = R.SetSelfReferences(
+ R.graph.NewNode(R.common.EffectPhi(2), R.start, R.self, loop));
+ R.ReduceGraph();
+ Node* end = R.graph.end();
+ CHECK_EQ(R.start, loop->InputAt(0));
+ CHECK_EQ(loop, loop->InputAt(1));
+ Node* merge = end->InputAt(0);
+ CHECK_EQ(IrOpcode::kMerge, merge->opcode());
+ CHECK_EQ(2, merge->InputCount());
+ CHECK_EQ(2, OperatorProperties::GetControlInputCount(merge->op()));
+ CHECK_EQ(R.start, merge->InputAt(0));
+
+ Node* terminate = merge->InputAt(1);
+ CHECK_EQ(IrOpcode::kTerminate, terminate->opcode());
+ CHECK_EQ(2, terminate->InputCount());
+ CHECK_EQ(1, OperatorProperties::GetEffectInputCount(terminate->op()));
+ CHECK_EQ(1, OperatorProperties::GetControlInputCount(terminate->op()));
+ CHECK_EQ(effect, terminate->InputAt(0));
+ CHECK_EQ(loop, terminate->InputAt(1));
+}
+
+
+TEST(CNonTermLoop_big1) {
+ ControlReducerTester R;
+ Branch b1(R, R.p0);
+ Node* rt = R.graph.NewNode(R.common.Return(), R.one, R.start, b1.if_true);
+
+ Branch b2(R, R.p0, b1.if_false);
+ Node* rf = R.graph.NewNode(R.common.Return(), R.zero, R.start, b2.if_true);
+ Node* loop = R.SetSelfReferences(
+ R.graph.NewNode(R.common.Loop(2), b2.if_false, R.self));
+ Node* merge = R.graph.NewNode(R.common.Merge(2), rt, rf);
+ R.end->ReplaceInput(0, merge);
+
+ R.ReduceGraph();
+
+ CheckInputs(R.end, merge);
+ CheckInputs(merge, rt, rf, loop);
+ CheckInputs(loop, b2.if_false, loop);
+}
+
+
+TEST(CNonTermLoop_big2) {
+ ControlReducerTester R;
+ Branch b1(R, R.p0);
+ Node* rt = R.graph.NewNode(R.common.Return(), R.one, R.start, b1.if_true);
+
+ Branch b2(R, R.zero, b1.if_false);
+ Node* rf = R.graph.NewNode(R.common.Return(), R.zero, R.start, b2.if_true);
+ Node* loop = R.SetSelfReferences(
+ R.graph.NewNode(R.common.Loop(2), b2.if_false, R.self));
+ Node* merge = R.graph.NewNode(R.common.Merge(2), rt, rf);
+ R.end->ReplaceInput(0, merge);
+
+ R.ReduceGraph();
+
+ Node* new_merge = R.end->InputAt(0); // old merge was reduced.
+ CHECK_NE(merge, new_merge);
+ CheckInputs(new_merge, rt, loop);
+ CheckInputs(loop, b1.if_false, loop);
+ CHECK(merge->IsDead());
+ CHECK(rf->IsDead());
+ CHECK(b2.if_true->IsDead());
+}
+
+
+TEST(Return1) {
+ ControlReducerTester R;
+ Node* ret = R.Return(R.one, R.start, R.start);
+ R.ReduceGraph();
+ CheckInputs(R.graph.end(), ret);
+ CheckInputs(ret, R.one, R.start, R.start);
+}
+
+
+TEST(Return2) {
+ ControlReducerTester R;
+ Diamond d(R, R.one);
+ Node* ret = R.Return(R.half, R.start, d.merge);
+ R.ReduceGraph();
+ CHECK(d.branch->IsDead());
+ CHECK(d.if_true->IsDead());
+ CHECK(d.if_false->IsDead());
+ CHECK(d.merge->IsDead());
+
+ CheckInputs(R.graph.end(), ret);
+ CheckInputs(ret, R.half, R.start, R.start);
+}
+
+
+TEST(Return_true1) {
+ ControlReducerTester R;
+ Diamond d(R, R.one, R.half, R.zero);
+ Node* ret = R.Return(d.phi, R.start, d.merge);
+ R.ReduceGraph();
+ CHECK(d.branch->IsDead());
+ CHECK(d.if_true->IsDead());
+ CHECK(d.if_false->IsDead());
+ CHECK(d.merge->IsDead());
+ CHECK(d.phi->IsDead());
+
+ CheckInputs(R.graph.end(), ret);
+ CheckInputs(ret, R.half, R.start, R.start);
+}
+
+
+TEST(Return_false1) {
+ ControlReducerTester R;
+ Diamond d(R, R.zero, R.one, R.half);
+ Node* ret = R.Return(d.phi, R.start, d.merge);
+ R.ReduceGraph();
+ CHECK(d.branch->IsDead());
+ CHECK(d.if_true->IsDead());
+ CHECK(d.if_false->IsDead());
+ CHECK(d.merge->IsDead());
+ CHECK(d.phi->IsDead());
+
+ CheckInputs(R.graph.end(), ret);
+ CheckInputs(ret, R.half, R.start, R.start);
+}
+
+
+void CheckDeadDiamond(Diamond& d) {
+ CHECK(d.branch->IsDead());
+ CHECK(d.if_true->IsDead());
+ CHECK(d.if_false->IsDead());
+ CHECK(d.merge->IsDead());
+ if (d.phi != NULL) CHECK(d.phi->IsDead());
+}
+
+
+void CheckLiveDiamond(Diamond& d, bool live_phi = true) {
+ CheckInputs(d.merge, d.if_true, d.if_false);
+ CheckInputs(d.if_true, d.branch);
+ CheckInputs(d.if_false, d.branch);
+ if (d.phi != NULL) {
+ if (live_phi) {
+ CHECK_EQ(3, d.phi->InputCount());
+ CHECK_EQ(d.merge, d.phi->InputAt(2));
+ } else {
+ CHECK(d.phi->IsDead());
+ }
+ }
+}
+
+
+TEST(Return_effect1) {
+ ControlReducerTester R;
+ Diamond d(R, R.one);
+ Node* e1 = R.jsgraph.Float64Constant(-100.1);
+ Node* e2 = R.jsgraph.Float64Constant(+100.1);
+ Node* effect = R.graph.NewNode(R.common.EffectPhi(2), e1, e2, d.merge);
+ Node* ret = R.Return(R.p0, effect, d.merge);
+ R.ReduceGraph();
+ CheckDeadDiamond(d);
+ CHECK(effect->IsDead());
+
+ CheckInputs(R.graph.end(), ret);
+ CheckInputs(ret, R.p0, e1, R.start);
+}
+
+
+TEST(Return_nested_diamonds1) {
+ ControlReducerTester R;
+ Diamond d1(R, R.p0, R.one, R.zero);
+ Diamond d2(R, R.p0);
+ Diamond d3(R, R.p0);
+
+ d2.nest(d1, true);
+ d3.nest(d1, false);
+
+ Node* ret = R.Return(d1.phi, R.start, d1.merge);
+
+ R.ReduceGraph(); // nothing should happen.
+
+ CheckInputs(ret, d1.phi, R.start, d1.merge);
+ CheckInputs(d1.phi, R.one, R.zero, d1.merge);
+ CheckInputs(d1.merge, d2.merge, d3.merge);
+ CheckLiveDiamond(d2);
+ CheckLiveDiamond(d3);
+}
+
+
+TEST(Return_nested_diamonds_true1) {
+ ControlReducerTester R;
+ Diamond d1(R, R.one, R.one, R.zero);
+ Diamond d2(R, R.p0);
+ Diamond d3(R, R.p0);
+
+ d2.nest(d1, true);
+ d3.nest(d1, false);
+
+ Node* ret = R.Return(d1.phi, R.start, d1.merge);
+
+ R.ReduceGraph(); // d1 gets folded true.
+
+ CheckInputs(ret, R.one, R.start, d2.merge);
+ CheckInputs(d2.branch, R.p0, R.start);
+ CheckDeadDiamond(d1);
+ CheckLiveDiamond(d2);
+ CheckDeadDiamond(d3);
+}
+
+
+TEST(Return_nested_diamonds_false1) {
+ ControlReducerTester R;
+ Diamond d1(R, R.zero, R.one, R.zero);
+ Diamond d2(R, R.p0);
+ Diamond d3(R, R.p0);
+
+ d2.nest(d1, true);
+ d3.nest(d1, false);
+
+ Node* ret = R.Return(d1.phi, R.start, d1.merge);
+
+ R.ReduceGraph(); // d1 gets folded false.
+
+ CheckInputs(ret, R.zero, R.start, d3.merge);
+ CheckInputs(d3.branch, R.p0, R.start);
+ CheckDeadDiamond(d1);
+ CheckDeadDiamond(d2);
+ CheckLiveDiamond(d3);
+}
+
+
+TEST(Return_nested_diamonds_true_true1) {
+ ControlReducerTester R;
+ Diamond d1(R, R.one, R.one, R.zero);
+ Diamond d2(R, R.one);
+ Diamond d3(R, R.p0);
+
+ d2.nest(d1, true);
+ d3.nest(d1, false);
+
+ Node* ret = R.Return(d1.phi, R.start, d1.merge);
+
+ R.ReduceGraph(); // d1 and d2 both get folded true.
+
+ CheckInputs(ret, R.one, R.start, R.start);
+ CheckDeadDiamond(d1);
+ CheckDeadDiamond(d2);
+ CheckDeadDiamond(d3);
+}
+
+
+TEST(Return_nested_diamonds_true_false1) {
+ ControlReducerTester R;
+ Diamond d1(R, R.one, R.one, R.zero);
+ Diamond d2(R, R.zero);
+ Diamond d3(R, R.p0);
+
+ d2.nest(d1, true);
+ d3.nest(d1, false);
+
+ Node* ret = R.Return(d1.phi, R.start, d1.merge);
+
+ R.ReduceGraph(); // d1 gets folded true and d2 gets folded false.
+
+ CheckInputs(ret, R.one, R.start, R.start);
+ CheckDeadDiamond(d1);
+ CheckDeadDiamond(d2);
+ CheckDeadDiamond(d3);
+}
+
+
+TEST(Return_nested_diamonds2) {
+ ControlReducerTester R;
+ Node* x2 = R.jsgraph.Float64Constant(11.1);
+ Node* y2 = R.jsgraph.Float64Constant(22.2);
+ Node* x3 = R.jsgraph.Float64Constant(33.3);
+ Node* y3 = R.jsgraph.Float64Constant(44.4);
+
+ Diamond d2(R, R.p0, x2, y2);
+ Diamond d3(R, R.p0, x3, y3);
+ Diamond d1(R, R.p0, d2.phi, d3.phi);
+
+ d2.nest(d1, true);
+ d3.nest(d1, false);
+
+ Node* ret = R.Return(d1.phi, R.start, d1.merge);
+
+ R.ReduceGraph(); // nothing should happen.
+
+ CheckInputs(ret, d1.phi, R.start, d1.merge);
+ CheckInputs(d1.phi, d2.phi, d3.phi, d1.merge);
+ CheckInputs(d1.merge, d2.merge, d3.merge);
+ CheckLiveDiamond(d2);
+ CheckLiveDiamond(d3);
+}
+
+
+TEST(Return_nested_diamonds_true2) {
+ ControlReducerTester R;
+ Node* x2 = R.jsgraph.Float64Constant(11.1);
+ Node* y2 = R.jsgraph.Float64Constant(22.2);
+ Node* x3 = R.jsgraph.Float64Constant(33.3);
+ Node* y3 = R.jsgraph.Float64Constant(44.4);
+
+ Diamond d2(R, R.p0, x2, y2);
+ Diamond d3(R, R.p0, x3, y3);
+ Diamond d1(R, R.one, d2.phi, d3.phi);
+
+ d2.nest(d1, true);
+ d3.nest(d1, false);
+
+ Node* ret = R.Return(d1.phi, R.start, d1.merge);
+
+ R.ReduceGraph(); // d1 gets folded true.
+
+ CheckInputs(ret, d2.phi, R.start, d2.merge);
+ CheckInputs(d2.branch, R.p0, R.start);
+ CheckDeadDiamond(d1);
+ CheckLiveDiamond(d2);
+ CheckDeadDiamond(d3);
+}
+
+
+TEST(Return_nested_diamonds_true_true2) {
+ ControlReducerTester R;
+ Node* x2 = R.jsgraph.Float64Constant(11.1);
+ Node* y2 = R.jsgraph.Float64Constant(22.2);
+ Node* x3 = R.jsgraph.Float64Constant(33.3);
+ Node* y3 = R.jsgraph.Float64Constant(44.4);
+
+ Diamond d2(R, R.one, x2, y2);
+ Diamond d3(R, R.p0, x3, y3);
+ Diamond d1(R, R.one, d2.phi, d3.phi);
+
+ d2.nest(d1, true);
+ d3.nest(d1, false);
+
+ Node* ret = R.Return(d1.phi, R.start, d1.merge);
+
+ R.ReduceGraph(); // d1 gets folded true.
+
+ CheckInputs(ret, x2, R.start, R.start);
+ CheckDeadDiamond(d1);
+ CheckDeadDiamond(d2);
+ CheckDeadDiamond(d3);
+}
+
+
+TEST(Return_nested_diamonds_true_false2) {
+ ControlReducerTester R;
+ Node* x2 = R.jsgraph.Float64Constant(11.1);
+ Node* y2 = R.jsgraph.Float64Constant(22.2);
+ Node* x3 = R.jsgraph.Float64Constant(33.3);
+ Node* y3 = R.jsgraph.Float64Constant(44.4);
+
+ Diamond d2(R, R.zero, x2, y2);
+ Diamond d3(R, R.p0, x3, y3);
+ Diamond d1(R, R.one, d2.phi, d3.phi);
+
+ d2.nest(d1, true);
+ d3.nest(d1, false);
+
+ Node* ret = R.Return(d1.phi, R.start, d1.merge);
+
+ R.ReduceGraph(); // d1 gets folded true.
+
+ CheckInputs(ret, y2, R.start, R.start);
+ CheckDeadDiamond(d1);
+ CheckDeadDiamond(d2);
+ CheckDeadDiamond(d3);
+}
diff --git a/test/cctest/compiler/test-instruction.cc b/test/cctest/compiler/test-instruction.cc
index fc48ca2..d61f34c 100644
--- a/test/cctest/compiler/test-instruction.cc
+++ b/test/cctest/compiler/test-instruction.cc
@@ -31,7 +31,7 @@
graph(zone()),
schedule(zone()),
info(static_cast<HydrogenCodeStub*>(NULL), main_isolate()),
- linkage(&info),
+ linkage(zone(), &info),
common(zone()),
code(NULL) {}
diff --git a/test/cctest/compiler/test-linkage.cc b/test/cctest/compiler/test-linkage.cc
index ff65d6e..923f7fc 100644
--- a/test/cctest/compiler/test-linkage.cc
+++ b/test/cctest/compiler/test-linkage.cc
@@ -45,7 +45,7 @@
InitializedHandleScope handles;
Handle<JSFunction> function = Compile("a + b");
CompilationInfoWithZone info(function);
- Linkage linkage(&info);
+ Linkage linkage(info.zone(), &info);
}
@@ -60,7 +60,7 @@
Handle<JSFunction> function = v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(CompileRun(sources[i])));
CompilationInfoWithZone info(function);
- Linkage linkage(&info);
+ Linkage linkage(info.zone(), &info);
CallDescriptor* descriptor = linkage.GetIncomingDescriptor();
CHECK_NE(NULL, descriptor);
@@ -76,7 +76,7 @@
TEST(TestLinkageCodeStubIncoming) {
Isolate* isolate = CcTest::InitIsolateOnce();
CompilationInfoWithZone info(static_cast<HydrogenCodeStub*>(NULL), isolate);
- Linkage linkage(&info);
+ Linkage linkage(info.zone(), &info);
// TODO(titzer): test linkage creation with a bonafide code stub.
// this just checks current behavior.
CHECK_EQ(NULL, linkage.GetIncomingDescriptor());
@@ -87,7 +87,7 @@
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + c");
CompilationInfoWithZone info(function);
- Linkage linkage(&info);
+ Linkage linkage(info.zone(), &info);
for (int i = 0; i < 32; i++) {
CallDescriptor* descriptor = linkage.GetJSCallDescriptor(i);
diff --git a/test/cctest/compiler/test-simplified-lowering.cc b/test/cctest/compiler/test-simplified-lowering.cc
index 3d1c255..47acbe1 100644
--- a/test/cctest/compiler/test-simplified-lowering.cc
+++ b/test/cctest/compiler/test-simplified-lowering.cc
@@ -61,7 +61,7 @@
Zone* zone = this->zone();
CompilationInfo info(zone->isolate(), zone);
Linkage linkage(
- &info, Linkage::GetSimplifiedCDescriptor(zone, this->machine_sig_));
+ zone, Linkage::GetSimplifiedCDescriptor(zone, this->machine_sig_));
ChangeLowering lowering(&jsgraph, &linkage);
GraphReducer reducer(this->graph());
reducer.AddReducer(&lowering);
diff --git a/test/cctest/test-feedback-vector.cc b/test/cctest/test-feedback-vector.cc
index 517d19f..79c6ea2 100644
--- a/test/cctest/test-feedback-vector.cc
+++ b/test/cctest/test-feedback-vector.cc
@@ -45,15 +45,73 @@
CHECK_EQ(3, vector->Slots());
CHECK_EQ(5, vector->ICSlots());
+ int metadata_length = vector->ic_metadata_length();
+ if (!FLAG_vector_ics) {
+ CHECK_EQ(0, metadata_length);
+ } else {
+ CHECK(metadata_length > 0);
+ }
+
int index = vector->GetIndex(FeedbackVectorSlot(0));
- CHECK_EQ(TypeFeedbackVector::kReservedIndexCount, index);
+ CHECK_EQ(TypeFeedbackVector::kReservedIndexCount + metadata_length, index);
CHECK(FeedbackVectorSlot(0) == vector->ToSlot(index));
index = vector->GetIndex(FeedbackVectorICSlot(0));
- CHECK_EQ(index, TypeFeedbackVector::kReservedIndexCount + 3);
+ CHECK_EQ(index,
+ TypeFeedbackVector::kReservedIndexCount + metadata_length + 3);
CHECK(FeedbackVectorICSlot(0) == vector->ToICSlot(index));
- CHECK_EQ(TypeFeedbackVector::kReservedIndexCount + 3 + 5, vector->length());
+ CHECK_EQ(TypeFeedbackVector::kReservedIndexCount + metadata_length + 3 + 5,
+ vector->length());
+}
+
+
+// IC slots need an encoding to recognize what is in there.
+TEST(VectorICMetadata) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ if (!FLAG_vector_ics) {
+ // If FLAG_vector_ics is false, we only store CALL_ICs in the vector, so
+ // there is no need for metadata to describe the slots.
+ return;
+ }
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+
+ Handle<TypeFeedbackVector> vector =
+ factory->NewTypeFeedbackVector(10, 3 * 10);
+ CHECK_EQ(10, vector->Slots());
+ CHECK_EQ(3 * 10, vector->ICSlots());
+
+ // Set metadata.
+ for (int i = 0; i < 30; i++) {
+ Code::Kind kind;
+ if (i % 3 == 0)
+ kind = Code::CALL_IC;
+ else if (i % 3 == 1)
+ kind = Code::LOAD_IC;
+ else
+ kind = Code::KEYED_LOAD_IC;
+ vector->SetKind(FeedbackVectorICSlot(i), kind);
+ }
+
+ // Meanwhile set some feedback values and type feedback values to
+ // verify the data structure remains intact.
+ vector->change_ic_with_type_info_count(100);
+ vector->change_ic_generic_count(3333);
+ vector->Set(FeedbackVectorSlot(0), *vector);
+
+ // Verify the metadata remains the same.
+ for (int i = 0; i < 30; i++) {
+ Code::Kind kind = vector->GetKind(FeedbackVectorICSlot(i));
+ if (i % 3 == 0) {
+ CHECK_EQ(Code::CALL_IC, kind);
+ } else if (i % 3 == 1) {
+ CHECK_EQ(Code::LOAD_IC, kind);
+ } else {
+ CHECK_EQ(Code::KEYED_LOAD_IC, kind);
+ }
+ }
}
@@ -129,11 +187,14 @@
CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
CHECK_EQ(0, feedback_vector->ic_generic_count());
- CHECK(feedback_vector->Get(FeedbackVectorICSlot(0))->IsAllocationSite());
+ int ic_slot = FLAG_vector_ics ? 1 : 0;
+ CHECK(
+ feedback_vector->Get(FeedbackVectorICSlot(ic_slot))->IsAllocationSite());
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
feedback_vector = f->shared()->feedback_vector();
CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
CHECK_EQ(0, feedback_vector->ic_generic_count());
- CHECK(feedback_vector->Get(FeedbackVectorICSlot(0))->IsAllocationSite());
+ CHECK(
+ feedback_vector->Get(FeedbackVectorICSlot(ic_slot))->IsAllocationSite());
}
}
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index eef43dc..ac121e0 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -2848,7 +2848,6 @@
"root = new F");
root = GetByName("root");
AddPropertyTo(2, root, "funny");
- CcTest::heap()->CollectGarbage(NEW_SPACE);
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -2875,7 +2874,6 @@
root = GetByName("root");
AddPropertyTo(2, root, "funny");
- CcTest::heap()->CollectGarbage(NEW_SPACE);
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 0c0e522..6a0e24a 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -826,19 +826,25 @@
}
-TEST(SerializeToplevelLargeString) {
+TEST(SerializeToplevelLargeStrings) {
FLAG_serialize_toplevel = true;
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
+ Factory* f = isolate->factory();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
v8::HandleScope scope(CcTest::isolate());
- Vector<const uint8_t> source = ConstructSource(
+ Vector<const uint8_t> source_s = ConstructSource(
STATIC_CHAR_VECTOR("var s = \""), STATIC_CHAR_VECTOR("abcdef"),
- STATIC_CHAR_VECTOR("\"; s"), 1000000);
+ STATIC_CHAR_VECTOR("\";"), 1000000);
+ Vector<const uint8_t> source_t = ConstructSource(
+ STATIC_CHAR_VECTOR("var t = \""), STATIC_CHAR_VECTOR("uvwxyz"),
+ STATIC_CHAR_VECTOR("\"; s + t"), 999999);
Handle<String> source_str =
- isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
+ f->NewConsString(f->NewStringFromOneByte(source_s).ToHandleChecked(),
+ f->NewStringFromOneByte(source_t).ToHandleChecked())
+ .ToHandleChecked();
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
@@ -865,13 +871,19 @@
Handle<Object> copy_result =
Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
- CHECK_EQ(6 * 1000000, Handle<String>::cast(copy_result)->length());
- CHECK(isolate->heap()->InSpace(HeapObject::cast(*copy_result), LO_SPACE));
+ CHECK_EQ(6 * 1999999, Handle<String>::cast(copy_result)->length());
+ Handle<Object> property = JSObject::GetDataProperty(
+ isolate->global_object(), f->NewStringFromAsciiChecked("s"));
+ CHECK(isolate->heap()->InSpace(HeapObject::cast(*property), LO_SPACE));
+ property = JSObject::GetDataProperty(isolate->global_object(),
+ f->NewStringFromAsciiChecked("t"));
+ CHECK(isolate->heap()->InSpace(HeapObject::cast(*property), LO_SPACE));
// Make sure we do not serialize too much, e.g. include the source string.
- CHECK_LT(cache->length(), 7000000);
+ CHECK_LT(cache->length(), 13000000);
delete cache;
- source.Dispose();
+ source_s.Dispose();
+ source_t.Dispose();
}
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index 0c1e041..8c5ad0e 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -76,6 +76,46 @@
}
+TEST(BitSetComputer) {
+ typedef BitSetComputer<bool, 1, kSmiValueSize, uint32_t> BoolComputer;
+ CHECK_EQ(0, BoolComputer::word_count(0));
+ CHECK_EQ(1, BoolComputer::word_count(8));
+ CHECK_EQ(2, BoolComputer::word_count(50));
+ CHECK_EQ(0, BoolComputer::index(0, 8));
+ CHECK_EQ(100, BoolComputer::index(100, 8));
+ CHECK_EQ(1, BoolComputer::index(0, 40));
+ uint32_t data = 0;
+ data = BoolComputer::encode(data, 1, true);
+ data = BoolComputer::encode(data, 4, true);
+ CHECK_EQ(true, BoolComputer::decode(data, 1));
+ CHECK_EQ(true, BoolComputer::decode(data, 4));
+ CHECK_EQ(false, BoolComputer::decode(data, 0));
+ CHECK_EQ(false, BoolComputer::decode(data, 2));
+ CHECK_EQ(false, BoolComputer::decode(data, 3));
+
+ // Lets store 2 bits per item with 3000 items and verify the values are
+ // correct.
+ typedef BitSetComputer<unsigned char, 2, 8, unsigned char> TwoBits;
+ const int words = 750;
+ CHECK_EQ(words, TwoBits::word_count(3000));
+ const int offset = 10;
+ Vector<unsigned char> buffer = Vector<unsigned char>::New(offset + words);
+ memset(buffer.start(), 0, sizeof(unsigned char) * buffer.length());
+ for (int i = 0; i < words; i++) {
+ const int index = TwoBits::index(offset, i);
+ unsigned char data = buffer[index];
+ data = TwoBits::encode(data, i, i % 4);
+ buffer[index] = data;
+ }
+
+ for (int i = 0; i < words; i++) {
+ const int index = TwoBits::index(offset, i);
+ unsigned char data = buffer[index];
+ CHECK_EQ(i % 4, TwoBits::decode(data, i));
+ }
+}
+
+
TEST(SNPrintF) {
// Make sure that strings that are truncated because of too small
// buffers are zero-terminated anyway.
diff --git a/test/mjsunit/regress/regress-3643.js b/test/mjsunit/regress/regress-3643.js
index cc61a1c..bbc94fd 100644
--- a/test/mjsunit/regress/regress-3643.js
+++ b/test/mjsunit/regress/regress-3643.js
@@ -2,20 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var a = [1, 2, 3];
-Object.defineProperty(a, '1', {
- get: function() { delete this[1]; return undefined; },
- configurable: true
-});
+function newArrayWithGetter() {
+ var arr = [1, 2, 3];
+ Object.defineProperty(arr, '1', {
+ get: function() { delete this[1]; return undefined; },
+ configurable: true
+ });
+ return arr;
+}
+
+var a = newArrayWithGetter();
var s = a.slice(1);
assertTrue('0' in s);
// Sparse case should hit the same code as above due to presence of the getter.
-a = [1, 2, 3];
+a = newArrayWithGetter();
a[0xffff] = 4;
-Object.defineProperty(a, '1', {
- get: function() { delete this[1]; return undefined; },
- configurable: true
-});
s = a.slice(1);
assertTrue('0' in s);
+
+a = newArrayWithGetter();
+a.shift();
+assertTrue('0' in a);
+
+a = newArrayWithGetter();
+a.unshift(0);
+assertTrue('2' in a);
diff --git a/test/unittests/compiler/change-lowering-unittest.cc b/test/unittests/compiler/change-lowering-unittest.cc
index 3f77a8f..ed57513 100644
--- a/test/unittests/compiler/change-lowering-unittest.cc
+++ b/test/unittests/compiler/change-lowering-unittest.cc
@@ -69,7 +69,7 @@
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(graph(), common(), &javascript, &machine);
CompilationInfo info(isolate(), zone());
- Linkage linkage(&info);
+ Linkage linkage(zone(), &info);
ChangeLowering reducer(&jsgraph, &linkage);
return reducer.Reduce(node);
}
diff --git a/test/unittests/compiler/instruction-selector-unittest.cc b/test/unittests/compiler/instruction-selector-unittest.cc
index defc953..0c5cdc5 100644
--- a/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/test/unittests/compiler/instruction-selector-unittest.cc
@@ -36,8 +36,7 @@
}
EXPECT_NE(0, graph()->NodeCount());
int initial_node_count = graph()->NodeCount();
- CompilationInfo info(test_->isolate(), test_->zone());
- Linkage linkage(&info, call_descriptor());
+ Linkage linkage(test_->zone(), call_descriptor());
InstructionSequence sequence(test_->zone(), graph(), schedule);
SourcePositionTable source_position_table(graph());
InstructionSelector selector(test_->zone(), &linkage, &sequence, schedule,