Version 3.21.6
Fixed inlined 'throw' statements interfering with live range computation. (issue 2843)
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@16410 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 16b5126..c5c6f36 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2013-08-29: Version 3.21.6
+
+ Fixed inlined 'throw' statements interfering with live range
+ computation. (issue 2843)
+
+ Performance and stability improvements on all platforms.
+
+
2013-08-28: Version 3.21.5
Fixed compilation with recent MinGW64 versions. (issue 2300)
diff --git a/include/v8.h b/include/v8.h
index c29bba6..51bd362 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -5416,7 +5416,7 @@
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 133;
+ static const int kEmptyStringRootIndex = 134;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -5427,7 +5427,7 @@
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
- static const int kJSObjectType = 0xb1;
+ static const int kJSObjectType = 0xb2;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 5978fad..f9e21f7 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1872,13 +1872,6 @@
}
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), r0);
return MarkAsCall(new(zone()) LThrow(value), instr);
@@ -2027,9 +2020,9 @@
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 90fb117..781dcf4 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -62,12 +62,12 @@
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckNonSmi) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
@@ -2338,16 +2338,16 @@
};
-class LCheckFunction V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index e34e15b..9cb92e8 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -5131,18 +5131,18 @@
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
+ Handle<HeapObject> object = instr->hydrogen()->object();
AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*target)) {
+ if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {
- __ cmp(reg, Operand(target));
+ __ cmp(reg, Operand(object));
}
DeoptimizeIf(ne, instr->environment());
}
diff --git a/src/cpu.cc b/src/cpu.cc
index 1bae001..26eca61 100644
--- a/src/cpu.cc
+++ b/src/cpu.cc
@@ -27,46 +27,54 @@
#include "cpu.h"
+#if V8_CC_MSVC
+#include <intrin.h> // __cpuid()
+#endif
+#if V8_OS_POSIX
+#include <unistd.h> // sysconf()
+#endif
+
#include <algorithm>
#include <cctype>
+#include <climits>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "checks.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
namespace v8 {
namespace internal {
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-#if V8_CC_MSVC
-
-#include <intrin.h> // NOLINT
-
-#elif defined(__i386__) && defined(__pic__)
+// Define __cpuid() for non-MSVC compilers.
+#if !V8_CC_MSVC
static V8_INLINE(void __cpuid(int cpu_info[4], int info_type)) {
+#if defined(__i386__) && defined(__pic__)
+ // Make sure to preserve ebx, which contains the pointer
+ // to the GOT in case we're generating PIC.
__asm__ volatile (
- "mov %%ebx, %%edi\n"
- "cpuid\n"
- "xchg %%edi, %%ebx\n"
+ "mov %%ebx, %%edi\n\t"
+ "cpuid\n\t"
+ "xchg %%edi, %%ebx\n\t"
: "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
: "a"(info_type)
);
-}
-
-#else // !V8_CC_MSVC || (!defined(__i386__) && !defined(__pic__))
-
-static V8_INLINE(void __cpuid(int cpu_info[4], int info_type)) {
+#else
__asm__ volatile (
"cpuid \n\t"
: "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
: "a"(info_type)
);
+#endif // defined(__i386__) && defined(__pic__)
}
-#endif
+#endif // !V8_CC_MSVC
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
@@ -443,4 +451,16 @@
#endif
}
+
+// static
+int CPU::NumberOfProcessorsOnline() {
+#if V8_OS_WIN
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+#else
+ return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
+#endif
+}
+
} } // namespace v8::internal
diff --git a/src/cpu.h b/src/cpu.h
index ef58b4c..58c7f5d 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -99,6 +99,9 @@
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
+ // Returns the number of processors online.
+ static int NumberOfProcessorsOnline() V8_WARN_UNUSED_RESULT;
+
// Initializes the cpu architecture support. Called once at VM startup.
static void SetUp();
diff --git a/src/factory.cc b/src/factory.cc
index 9323c2f..6faa84e 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1249,6 +1249,21 @@
}
+Handle<OptimizedCodeEntry> Factory::NewOptimizedCodeEntry(
+ Handle<Context> native_context,
+ Handle<JSFunction> function,
+ Handle<Code> code,
+ Handle<FixedArray> literals) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateOptimizedCodeEntry(
+ *native_context,
+ *function,
+ *code,
+ *literals),
+ OptimizedCodeEntry);
+}
+
+
Handle<String> Factory::NumberToString(Handle<Object> number) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->NumberToString(*number), String);
diff --git a/src/factory.h b/src/factory.h
index 02c9a4d..390329c 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -487,6 +487,12 @@
Handle<ScopeInfo> scope_info);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
+ Handle<OptimizedCodeEntry> NewOptimizedCodeEntry(
+ Handle<Context> native_context,
+ Handle<JSFunction> function,
+ Handle<Code> code,
+ Handle<FixedArray> literals);
+
Handle<JSMessageObject> NewJSMessageObject(
Handle<String> type,
Handle<JSArray> arguments,
diff --git a/src/heap.cc b/src/heap.cc
index b1096b3..4fd478f 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2824,6 +2824,12 @@
}
set_shared_function_info_map(Map::cast(obj));
+ { MaybeObject* maybe_obj = AllocateMap(OPTIMIZED_CODE_ENTRY_TYPE,
+ OptimizedCodeEntry::kAlignedSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_optimized_code_entry_map(Map::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
JSMessageObject::kSize);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -3414,7 +3420,7 @@
// Flush the number to string cache.
int len = number_string_cache()->length();
for (int i = 0; i < len; i++) {
- number_string_cache()->set_undefined(this, i);
+ number_string_cache()->set_undefined(i);
}
}
@@ -3651,6 +3657,30 @@
}
+MaybeObject* Heap::AllocateOptimizedCodeEntry(
+ Context* native_context,
+ JSFunction* function,
+ Code* code,
+ FixedArray* literals) {
+ OptimizedCodeEntry* entry;
+ MaybeObject* maybe = Allocate(optimized_code_entry_map(), OLD_POINTER_SPACE);
+ if (!maybe->To<OptimizedCodeEntry>(&entry)) return maybe;
+
+ // Set pointer fields.
+ entry->set_native_context(native_context);
+ entry->set_function(function);
+ entry->set_code(code);
+ entry->set_literals(literals);
+
+ // NULL-out link fields.
+ entry->set_next_by_shared_info(NULL, SKIP_WRITE_BARRIER);
+ entry->set_next_by_native_context(NULL, SKIP_WRITE_BARRIER);
+ entry->set_cacheable(false);
+
+ return entry;
+}
+
+
MaybeObject* Heap::AllocateJSMessageObject(String* type,
JSArray* arguments,
int start_position,
diff --git a/src/heap.h b/src/heap.h
index 5be7861..f0920b3 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -63,6 +63,7 @@
V(Map, cell_map, CellMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
+ V(Map, optimized_code_entry_map, OptimizedCodeEntryMap) \
V(Map, meta_map, MetaMap) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, native_context_map, NativeContextMap) \
@@ -78,9 +79,9 @@
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
V(Oddball, arguments_marker, ArgumentsMarker) \
- /* The first 32 roots above this line should be boring from a GC point of */ \
- /* view. This means they are never in new space and never on a page that */ \
- /* is being compacted. */ \
+ /* The roots above this line should be boring from a GC point of view. */ \
+ /* This means they are never in new space and never on a page that is */ \
+ /* being compacted. */ \
V(FixedArray, number_string_cache, NumberStringCache) \
V(Object, instanceof_cache_function, InstanceofCacheFunction) \
V(Object, instanceof_cache_map, InstanceofCacheMap) \
@@ -1097,6 +1098,16 @@
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateSharedFunctionInfo(Object* name);
+ // Allocates a new OptimizedCodeEntry object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ MUST_USE_RESULT MaybeObject* AllocateOptimizedCodeEntry(
+ Context* native_context,
+ JSFunction* function,
+ Code* code,
+ FixedArray* literals);
+
// Allocates a new JSMessageObject object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
diff --git a/src/hydrogen-environment-liveness.cc b/src/hydrogen-environment-liveness.cc
index 9efa47b..fad9755 100644
--- a/src/hydrogen-environment-liveness.cc
+++ b/src/hydrogen-environment-liveness.cc
@@ -163,11 +163,7 @@
live->Clear();
for (int i = 0; i < enter->return_targets()->length(); ++i) {
int return_id = enter->return_targets()->at(i)->block_id();
- // When an AbnormalExit is involved, it can happen that the return
- // target block doesn't actually exist.
- if (return_id < live_at_block_start_.length()) {
- live->Union(*live_at_block_start_[return_id]);
- }
+ live->Union(*live_at_block_start_[return_id]);
}
last_simulate_ = NULL;
break;
diff --git a/src/hydrogen-escape-analysis.cc b/src/hydrogen-escape-analysis.cc
index 311091b..145a779 100644
--- a/src/hydrogen-escape-analysis.cc
+++ b/src/hydrogen-escape-analysis.cc
@@ -86,7 +86,8 @@
// Create a new state full of phis for loop header entries.
HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader(
- HInstruction* previous, HCapturedObject* old_state) {
+ HInstruction* previous,
+ HCapturedObject* old_state) {
HBasicBlock* block = previous->block();
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
@@ -100,7 +101,8 @@
// Create a new state by copying an existing one.
HCapturedObject* HEscapeAnalysisPhase::NewStateCopy(
- HInstruction* previous, HCapturedObject* old_state) {
+ HInstruction* previous,
+ HCapturedObject* old_state) {
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
HValue* operand = old_state->OperandAt(index);
@@ -112,8 +114,9 @@
// Insert a newly created phi into the given block and fill all incoming
// edges with the given value.
-HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(
- HBasicBlock* block, HValue* incoming_value, int index) {
+HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(HBasicBlock* block,
+ HValue* incoming_value,
+ int index) {
Zone* zone = graph()->zone();
HPhi* phi = new(zone) HPhi(HPhi::kInvalidMergedIndex, zone);
for (int i = 0; i < block->predecessors()->length(); i++) {
@@ -124,6 +127,21 @@
}
+// Insert a newly created value check as a replacement for map checks.
+HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
+ HCheckMaps* mapcheck) {
+ Zone* zone = graph()->zone();
+ HValue* value = state->map_value();
+ // TODO(mstarzinger): This will narrow a map check against a set of maps
+ // down to the first element in the set. Revisit and fix this.
+ Handle<Map> map_object = mapcheck->map_set()->first();
+ UniqueValueId map_id = mapcheck->map_unique_ids()->first();
+ HCheckValue* check = HCheckValue::New(zone, NULL, value, map_object, map_id);
+ check->InsertBefore(mapcheck);
+ return check;
+}
+
+
// Performs a forward data-flow analysis of all loads and stores on the
// given captured allocation. This uses a reverse post-order iteration
// over affected basic blocks. All non-escaping instructions are handled
@@ -172,12 +190,15 @@
int index = store->access().offset() / kPointerSize;
if (store->object() != allocate) continue;
ASSERT(store->access().IsInobject());
- state = NewStateCopy(store, state);
+ state = NewStateCopy(store->previous(), state);
state->SetOperandAt(index, store->value());
if (store->has_transition()) {
state->SetOperandAt(0, store->transition());
}
- store->DeleteAndReplaceWith(NULL);
+ if (store->HasObservableSideEffects()) {
+ state->ReuseSideEffectsFromStore(store);
+ }
+ store->DeleteAndReplaceWith(store->ActualValue());
if (FLAG_trace_escape_analysis) {
PrintF("Replacing store #%d%s\n", instr->id(),
store->has_transition() ? " (with transition)" : "");
@@ -196,15 +217,13 @@
case HValue::kCheckHeapObject: {
HCheckHeapObject* check = HCheckHeapObject::cast(instr);
if (check->value() != allocate) continue;
- check->DeleteAndReplaceWith(NULL);
+ check->DeleteAndReplaceWith(check->ActualValue());
break;
}
case HValue::kCheckMaps: {
HCheckMaps* mapcheck = HCheckMaps::cast(instr);
if (mapcheck->value() != allocate) continue;
- // TODO(mstarzinger): This approach breaks if the tracked map value
- // is not a HConstant. Find a repro test case and fix this.
- ASSERT(mapcheck->ActualValue() == allocate);
+ NewMapCheckAndInsert(state, mapcheck);
mapcheck->DeleteAndReplaceWith(mapcheck->ActualValue());
break;
}
diff --git a/src/hydrogen-escape-analysis.h b/src/hydrogen-escape-analysis.h
index 2d425e2..9db46cb 100644
--- a/src/hydrogen-escape-analysis.h
+++ b/src/hydrogen-escape-analysis.h
@@ -63,6 +63,8 @@
HPhi* NewPhiAndInsert(HBasicBlock* block, HValue* incoming_value, int index);
+ HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck);
+
HCapturedObject* StateAt(HBasicBlock* block) {
return block_states_.at(block->block_id());
}
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index a9adfc1..f6ee8dc 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1451,15 +1451,16 @@
}
-void HCheckFunction::PrintDataTo(StringStream* stream) {
+void HCheckValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" %p", *target());
+ stream->Add(" ");
+ object()->ShortPrint(stream);
}
-HValue* HCheckFunction::Canonicalize() {
+HValue* HCheckValue::Canonicalize() {
return (value()->IsConstant() &&
- HConstant::cast(value())->UniqueValueIdsMatch(target_unique_id_))
+ HConstant::cast(value())->UniqueValueIdsMatch(object_unique_id_))
? NULL
: this;
}
@@ -4052,7 +4053,7 @@
}
-void HCheckFunction::Verify() {
+void HCheckValue::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 8f79edf..dd42190 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -63,7 +63,6 @@
#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
- V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
V(Allocate) \
@@ -88,12 +87,12 @@
V(CallStub) \
V(CapturedObject) \
V(Change) \
- V(CheckFunction) \
V(CheckHeapObject) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
@@ -1443,16 +1442,6 @@
};
-class HAbnormalExit V8_FINAL : public HTemplateControlInstruction<0, 0> {
- public:
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
-};
-
-
class HUnaryOperation : public HTemplateInstruction<1> {
public:
HUnaryOperation(HValue* value, HType type = HType::Tagged())
@@ -2562,6 +2551,7 @@
HValue* value() { return OperandAt(0); }
SmallMapList* map_set() { return &map_set_; }
+ ZoneList<UniqueValueId>* map_unique_ids() { return &map_unique_ids_; }
bool has_migration_target() {
return has_migration_target_;
@@ -2630,9 +2620,20 @@
};
-class HCheckFunction V8_FINAL : public HUnaryOperation {
+class HCheckValue V8_FINAL : public HUnaryOperation {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCheckFunction, HValue*, Handle<JSFunction>);
+ static HCheckValue* New(Zone* zone, HValue* context,
+ HValue* value, Handle<JSFunction> target) {
+ bool in_new_space = Isolate::Current()->heap()->InNewSpace(*target);
+ HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
+ return check;
+ }
+ static HCheckValue* New(Zone* zone, HValue* context,
+ HValue* value, Handle<Map> map, UniqueValueId id) {
+ HCheckValue* check = new(zone) HCheckValue(value, map, false);
+ check->object_unique_id_ = id;
+ return check;
+ }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -2646,32 +2647,31 @@
#endif
virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- target_unique_id_ = UniqueValueId(target_);
+ object_unique_id_ = UniqueValueId(object_);
}
- Handle<JSFunction> target() const { return target_; }
- bool target_in_new_space() const { return target_in_new_space_; }
+ Handle<HeapObject> object() const { return object_; }
+ bool object_in_new_space() const { return object_in_new_space_; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- HCheckFunction* b = HCheckFunction::cast(other);
- return target_unique_id_ == b->target_unique_id_;
+ HCheckValue* b = HCheckValue::cast(other);
+ return object_unique_id_ == b->object_unique_id_;
}
private:
- HCheckFunction(HValue* value, Handle<JSFunction> function)
+ HCheckValue(HValue* value, Handle<HeapObject> object, bool in_new_space)
: HUnaryOperation(value, value->type()),
- target_(function), target_unique_id_() {
+ object_(object), object_in_new_space_(in_new_space) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- target_in_new_space_ = Isolate::Current()->heap()->InNewSpace(*function);
}
- Handle<JSFunction> target_;
- UniqueValueId target_unique_id_;
- bool target_in_new_space_;
+ Handle<HeapObject> object_;
+ UniqueValueId object_unique_id_;
+ bool object_in_new_space_;
};
@@ -3230,6 +3230,15 @@
int length() const { return values_.length(); }
int capture_id() const { return capture_id_; }
+ // Shortcut for the map value of this captured object.
+ HValue* map_value() const { return values()->first(); }
+
+ void ReuseSideEffectsFromStore(HInstruction* store) {
+ ASSERT(store->HasObservableSideEffects());
+ ASSERT(store->IsStoreNamedField());
+ gvn_flags_.Add(store->gvn_flags());
+ }
+
// Replay effects of this instruction on the given environment.
void ReplayEnvironment(HEnvironment* env);
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 4847f1a..b19ca8f 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -5406,8 +5406,6 @@
HThrow* instr = Add<HThrow>(value);
instr->set_position(expr->position());
Add<HSimulate>(expr->id());
- current_block()->FinishExit(new(zone()) HAbnormalExit);
- set_current_block(NULL);
}
@@ -7110,7 +7108,7 @@
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Pop();
- Add<HCheckFunction>(function, expr->target());
+ Add<HCheckValue>(function, expr->target());
// Replace the global object with the global receiver.
HGlobalReceiver* global_receiver = Add<HGlobalReceiver>(global_object);
@@ -7162,7 +7160,7 @@
HGlobalReceiver* receiver = New<HGlobalReceiver>(global);
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- Add<HCheckFunction>(function, expr->target());
+ Add<HCheckValue>(function, expr->target());
if (TryInlineBuiltinFunctionCall(expr, true)) { // Drop the function.
if (FLAG_trace_inlining) {
@@ -7225,7 +7223,7 @@
HValue* function = Top();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<JSFunction> constructor = expr->target();
- HValue* check = Add<HCheckFunction>(function, constructor);
+ HValue* check = Add<HCheckValue>(function, constructor);
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
@@ -7317,7 +7315,7 @@
HBinaryCall* call;
if (expr->target().is_identical_to(array_function)) {
Handle<Cell> cell = expr->allocation_info_cell();
- Add<HCheckFunction>(constructor, array_function);
+ Add<HCheckValue>(constructor, array_function);
call = new(zone()) HCallNewArray(context, constructor, argument_count,
cell, expr->elements_kind());
} else {
@@ -8149,7 +8147,7 @@
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
- Add<HCheckFunction>(right, target);
+ Add<HCheckValue>(right, target);
HInstanceOfKnownGlobal* result =
new(zone()) HInstanceOfKnownGlobal(context, left, target);
result->set_position(expr->position());
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index ccd623c..e7e0327 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -5800,15 +5800,15 @@
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (instr->hydrogen()->target_in_new_space()) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Handle<HeapObject> object = instr->hydrogen()->object();
+ if (instr->hydrogen()->object_in_new_space()) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ cmp(reg, Operand::ForCell(cell));
} else {
Operand operand = ToOperand(instr->value());
- __ cmp(operand, target);
+ __ cmp(operand, object);
}
DeoptimizeIf(not_equal, instr->environment());
}
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 7ec1258..e6cc719 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1886,13 +1886,6 @@
}
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
@@ -2060,14 +2053,14 @@
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- // If the target is in new space, we'll emit a global cell compare and so
- // want the value in a register. If the target gets promoted before we
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ // If the object is in new space, we'll emit a global cell compare and so
+ // want the value in a register. If the object gets promoted before we
// emit code, we will still get the register but will do an immediate
// compare instead of the cell compare. This is safe.
- LOperand* value = instr->target_in_new_space()
+ LOperand* value = instr->object_in_new_space()
? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index e8a3baf..e795926 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -62,12 +62,12 @@
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
@@ -2437,16 +2437,16 @@
};
-class LCheckFunction V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index 9fb16fb..ddb1580 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -39,9 +39,6 @@
SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
if (isolate->context() != NULL) {
context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_ = Handle<Context>(isolate->context());
-#endif
}
isolate->set_save_context(this);
diff --git a/src/isolate.cc b/src/isolate.cc
index 774ad5c..54329b7 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -137,7 +137,7 @@
int SystemThreadManager::NumberOfParallelSystemThreads(
ParallelSystemComponent type) {
- int number_of_threads = Min(OS::NumberOfCores(), kMaxThreads);
+ int number_of_threads = Min(CPU::NumberOfProcessorsOnline(), kMaxThreads);
ASSERT(number_of_threads > 0);
if (number_of_threads == 1) {
return 0;
diff --git a/src/isolate.h b/src/isolate.h
index 3d42cad..751f995 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1406,9 +1406,6 @@
private:
Handle<Context> context_;
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- Handle<Context> dummy_;
-#endif
SaveContext* prev_;
Address c_entry_fp_;
};
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index f065da1..4eb51b5 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2590,7 +2590,7 @@
for (int i = new_number_of_transitions * step;
i < number_of_transitions * step;
i++) {
- prototype_transitions->set_undefined(heap_, header + i);
+ prototype_transitions->set_undefined(header + i);
}
}
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 2c42001..710901e 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -521,290 +521,6 @@
}
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- __ sra(scratch1, a0, kSmiTagSize);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ sra(scratch1, a1, kSmiTagSize);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- if (destination == kCoreRegisters) {
- __ Move(a2, a3, f14);
- __ Move(a0, a1, f12);
- }
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
-
- Label is_smi, done;
-
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (destination == kFPURegisters) {
- // Load the double from tagged HeapNumber to double register.
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
- // point in generating even more instructions.
- __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ lw(dst2, FieldMemOperand(object,
- HeapNumber::kValueOffset + kPointerSize));
- }
- __ Branch(&done);
-
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- // Convert smi to double using FPU instructions.
- __ mtc1(scratch1, dst);
- __ cvt_d_w(dst, dst);
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ Move(dst1, dst2, dst);
- }
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
- Label done;
- Label not_in_int32_range;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- ¬_in_int32_range);
- __ jmp(&done);
-
- __ bind(¬_in_int32_range);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst_mantissa,
- Register dst_exponent,
- Register scratch2,
- FPURegister single_scratch) {
- ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst_mantissa));
- ASSERT(!int_scratch.is(dst_exponent));
-
- __ mtc1(int_scratch, single_scratch);
- __ cvt_d_w(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DoubleRegister double_dst,
- DoubleRegister double_scratch,
- Register dst_mantissa,
- Register dst_exponent,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
- dst_exponent, scratch2, single_scratch);
- __ Branch(&done);
-
- __ bind(&obj_is_not_smi);
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- // Load the double value.
- __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- scratch1,
- double_dst,
- at,
- double_scratch,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DoubleRegister double_scratch0,
- DoubleRegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
-
- Label done, maybe_undefined;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
-
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
-
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- // Load the double value.
- __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- __ Branch(&done);
-
- __ bind(&maybe_undefined);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(not_int32, ne, object, Operand(at));
- // |undefined| is truncated to 0.
- __ li(dst, Operand(Smi::FromInt(0)));
- // Fall through.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is saved.
- // We currently always use s0 to pass it.
- ASSERT(heap_number_result.is(s0));
-
- // Push the current return address before the C call.
- __ push(ra);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- if (!IsMipsSoftFloatABI) {
- // We are not using MIPS FPU instructions, and parameters for the runtime
- // function call are prepaired in a0-a3 registers, but function we are
- // calling is compiled with hard-float flag and expecting hard float ABI
- // (parameters in f12/f14 registers). We need to copy parameters from
- // a0-a3 registers to f12/f14 register pairs.
- __ Move(f12, a0, a1);
- __ Move(f14, a2, a3);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number.
- if (!IsMipsSoftFloatABI) {
- // Double returned in register f0.
- __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- // Double returned in registers v0 and v1.
- __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
- __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
- }
- // Place heap_number_result in v0 and return to the pushed return address.
- __ pop(ra);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-}
-
-
bool WriteInt32ToHeapNumberStub::IsPregenerated() {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(a1) &&
@@ -1472,6 +1188,42 @@
}
+// Generates code to call a C function to do a double operation.
+// This code never falls through, but returns with a heap number containing
+// the result in v0.
+// Register heap_number_result must be a heap number in which the
+// result of the operation will be stored.
+// Requires the following layout on entry:
+// a0: Left value (least significant part of mantissa).
+// a1: Left value (sign, exponent, top of mantissa).
+// a2: Right value (least significant part of mantissa).
+// a3: Right value (sign, exponent, top of mantissa).
+static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
+ // Assert that heap_number_result is saved.
+ // We currently always use s0 to pass it.
+ ASSERT(heap_number_result.is(s0));
+
+ // Push the current return address before the C call.
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
+ // Store answer in the overwritable heap number.
+ // Double returned in register f0.
+ __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ // Place heap_number_result in v0 and return to the pushed return address.
+ __ pop(ra);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, heap_number_result);
+}
+
+
void BinaryOpStub::Initialize() {
platform_specific_bit_ = true; // FPU is a base requirement for V8.
}
@@ -1699,49 +1451,41 @@
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
- // depending on operation.
- FloatingPointHelper::Destination destination =
- op != Token::MOD ?
- FloatingPointHelper::kFPURegisters :
- FloatingPointHelper::kCoreRegisters;
-
// Allocate new heap number for result.
Register result = s0;
BinaryOpStub_GenerateHeapResultAllocation(
masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
- // Load the operands.
+ // Load left and right operands into f12 and f14.
if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+ __ SmiUntag(scratch1, a0);
+ __ mtc1(scratch1, f14);
+ __ cvt_d_w(f14, f14);
+ __ SmiUntag(scratch1, a1);
+ __ mtc1(scratch1, f12);
+ __ cvt_d_w(f12, f12);
} else {
- // Load right operand to f14 or a2/a3.
+ // Load right operand to f14.
if (right_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, right, destination, f14, f16, a2, a3, heap_number_map,
- scratch1, scratch2, f2, miss);
+ __ LoadNumberAsInt32Double(
+ right, f14, heap_number_map, scratch1, scratch2, f2, miss);
} else {
Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, right, f14, a2, a3, heap_number_map,
- scratch1, scratch2, fail);
+ __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
}
// Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
// jumps to |miss|.
if (left_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, left, destination, f12, f16, a0, a1, heap_number_map,
- scratch1, scratch2, f2, miss);
+ __ LoadNumberAsInt32Double(
+ left, f12, heap_number_map, scratch1, scratch2, f2, miss);
} else {
Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, left, f12, a0, a1, heap_number_map,
- scratch1, scratch2, fail);
+ __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
}
}
// Calculate the result.
- if (destination == FloatingPointHelper::kFPURegisters) {
+ if (op != Token::MOD) {
// Using FPU registers:
// f12: Left value.
// f14: Right value.
@@ -1770,10 +1514,7 @@
__ mov(v0, result);
} else {
// Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op,
- result,
- scratch1);
+ CallCCodeForDoubleOperation(masm, op, result, scratch1);
if (FLAG_debug_code) {
__ stop("Unreachable code.");
}
@@ -1791,24 +1532,12 @@
__ SmiUntag(a2, right);
} else {
// Convert operands to 32-bit integers. Right in a2 and left in a3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
+ __ ConvertNumberToInt32(
+ left, a3, heap_number_map,
+ scratch1, scratch2, scratch3, f0, not_numbers);
+ __ ConvertNumberToInt32(
+ right, a2, heap_number_map,
+ scratch1, scratch2, scratch3, f0, not_numbers);
}
Label result_not_a_smi;
switch (op) {
@@ -2042,36 +1771,13 @@
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers a0 and a1 (right
// and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination = (op_ != Token::MOD)
- ? FloatingPointHelper::kFPURegisters
- : FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- f14,
- f16,
- a2,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- f12,
- f16,
- t0,
- t1,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
+ __ LoadNumberAsInt32Double(
+ right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
+ __ LoadNumberAsInt32Double(
+ left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
- if (destination == FloatingPointHelper::kFPURegisters) {
+ if (op_ != Token::MOD) {
Label return_heap_number;
switch (op_) {
case Token::ADD:
@@ -2148,10 +1854,6 @@
__ BranchF(&transition, NULL, ne, f14, f16);
}
- // We preserved a0 and a1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(t1, t0);
-
Label pop_and_call_runtime;
// Allocate a heap number to store the result.
@@ -2164,12 +1866,8 @@
&pop_and_call_runtime,
mode_);
- // Load the left value from the value saved on the stack.
- __ Pop(a1, a0);
-
// Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
+ CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
if (FLAG_debug_code) {
__ stop("Unreachable code.");
}
@@ -2189,30 +1887,13 @@
case Token::SHR:
case Token::SHL: {
Label return_heap_number;
- Register scratch3 = t1;
// Convert operands to 32-bit integers. Right in a2 and left in a3. The
// registers a0 and a1 (right and left) are preserved for the runtime
// call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- f2,
- &transition);
+ __ LoadNumberAsInt32(
+ left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
+ __ LoadNumberAsInt32(
+ right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
// The ECMA-262 standard specifies that, for shift operations, only the
// 5 least significant bits of the shift value should be used.
@@ -7097,10 +6778,7 @@
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3,
- // Overwrites all regs after this.
- t1, t2, t3, t5, a2,
- &slow_elements);
+ __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 1ae1d34..221b344 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -527,119 +527,6 @@
bool NeedsImmovableCode() { return true; }
};
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination {
- kFPURegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |int_scratch| to a double, storing
- // the result either in |double_dst| or |dst2:dst1|, depending on
- // |destination|.
- // Warning: The value in |int_scratch| will be changed in the process!
- static void ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- FPURegister single_scratch);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- FPURegister double_dst,
- FPURegister double_scratch,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when FPU is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when FPU is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in v0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- // Loads the objects from |object| into floating point registers.
- // Depending on |destination| the value ends up either in |dst| or
- // in |dst1|/|dst2|. If |destination| is kFPURegisters, then FPU
- // must be supported. If kCoreRegisters are requested and FPU is
- // supported, |dst| will be scratched. If |object| is neither smi nor
- // heap number, |not_number| is jumped to with |object| still intact.
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index eb730bb..847dea0 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1238,7 +1238,6 @@
a3, // Scratch regs...
t0,
t1,
- t2,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 80794c8..d2ab06a 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -5116,20 +5116,20 @@
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
+ Handle<HeapObject> object = instr->hydrogen()->object();
AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*target)) {
+ if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr->environment(), reg,
Operand(at));
} else {
DeoptimizeIf(ne, instr->environment(), reg,
- Operand(target));
+ Operand(object));
}
}
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index b11c9c2..b8775c3 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1791,13 +1791,6 @@
}
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), a0);
return MarkAsCall(new(zone()) LThrow(value), instr);
@@ -1951,9 +1944,9 @@
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 6498943..dfdf63c 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -62,12 +62,12 @@
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
@@ -2321,16 +2321,16 @@
};
-class LCheckFunction V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index ace7323..2738900 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3266,7 +3266,7 @@
tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (tagging_mode == TAG_RESULT) {
sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} else {
@@ -3428,7 +3428,6 @@
Register scratch1,
Register scratch2,
Register scratch3,
- Register scratch4,
Label* fail,
int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
@@ -3485,25 +3484,11 @@
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
- FloatingPointHelper::Destination destination;
- destination = FloatingPointHelper::kFPURegisters;
-
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- f0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- f2);
- if (destination == FloatingPointHelper::kFPURegisters) {
- sdc1(f0, MemOperand(scratch1, 0));
- } else {
- sw(mantissa_reg, MemOperand(scratch1, 0));
- sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
+ mtc1(untagged_value, f2);
+ cvt_d_w(f0, f2);
+ sdc1(f0, MemOperand(scratch1, 0));
bind(&done);
}
@@ -4400,15 +4385,6 @@
}
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(at, index);
- Check(eq, kRegisterDidNotMatchExpectedRoot, reg, Operand(at));
- }
-}
-
-
void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) {
ASSERT(!elements.is(at));
@@ -4601,6 +4577,150 @@
}
+void MacroAssembler::ConvertNumberToInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_number) {
+ Label done;
+ Label not_in_int32_range;
+
+ UntagAndJumpIfSmi(dst, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+ ConvertToInt32(object,
+ dst,
+ scratch1,
+ scratch2,
+ double_scratch,
+ ¬_in_int32_range);
+ jmp(&done);
+
+ bind(¬_in_int32_range);
+ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ EmitOutOfInt32RangeTruncate(dst,
+ scratch1,
+ scratch2,
+ scratch3);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumber(Register object,
+ FPURegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label is_smi, done;
+
+ UntagAndJumpIfSmi(scratch, object, &is_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+
+ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Branch(&done);
+
+ bind(&is_smi);
+ mtc1(scratch, dst);
+ cvt_d_w(dst, dst);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32Double(Register object,
+ DoubleRegister double_dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+
+ Label done, obj_is_not_smi;
+
+ UntagAndJumpIfNotSmi(scratch1, object, &obj_is_not_smi);
+ mtc1(scratch1, double_scratch);
+ cvt_d_w(double_dst, double_scratch);
+ Branch(&done);
+
+ bind(&obj_is_not_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Load the number.
+ // Load the double value.
+ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ EmitFPUTruncate(kRoundToZero,
+ scratch1,
+ double_dst,
+ at,
+ double_scratch,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
+ Label* not_int32) {
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+
+ Label done, maybe_undefined;
+
+ UntagAndJumpIfSmi(dst, object, &done);
+
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ // Load the double value.
+ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ EmitFPUTruncate(kRoundToZero,
+ dst,
+ double_scratch0,
+ scratch1,
+ double_scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ Branch(&done);
+
+ bind(&maybe_undefined);
+ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ Branch(not_int32, ne, object, Operand(at));
+ // |undefined| is truncated to 0.
+ li(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
+
+ bind(&done);
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t8, Operand(Smi::FromInt(type)));
@@ -4920,13 +5040,11 @@
}
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- BailoutReason reason) {
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
- ASSERT(!src.is(at));
- LoadRoot(at, root_value_index);
- Check(eq, reason, src, Operand(at));
+ ASSERT(!reg.is(at));
+ LoadRoot(at, index);
+ Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
}
}
@@ -4936,7 +5054,7 @@
Register scratch,
Label* on_not_heap_number) {
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
}
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 9605038..26bed68 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -797,6 +797,54 @@
Register scratch2,
Register scratch3);
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1.
+ void ConvertNumberToInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst register.
+ // If |object| is neither smi nor heap number, |not_number| is jumped to
+ // with |object| still intact.
+ void LoadNumber(Register object,
+ FPURegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number);
+
+ // Loads the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ void LoadNumberAsInt32Double(Register object,
+ DoubleRegister double_dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ void LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
+ Label* not_int32);
+
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack, currently disabled.
@@ -986,16 +1034,13 @@
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail, in which
- // case scratch2, scratch3 and scratch4 are unmodified.
+ // the FastDoubleElements array elements. Otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- // All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- Register scratch4,
Label* fail,
int elements_offset = 0);
@@ -1280,7 +1325,6 @@
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
@@ -1367,11 +1411,9 @@
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
- // Abort execution if argument is not the root value with the given index,
+ // Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- BailoutReason reason);
+ void AssertIsRoot(Register reg, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// HeapNumber utilities.
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index ccbe86a..18353b6 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1775,7 +1775,7 @@
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
- t0, v0, elements, a3, t1, a2, t5,
+ t0, v0, elements, a3, t1, a2,
&call_builtin, argc * kDoubleSize);
// Save new length.
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index a61eacf..4ba70c3 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -201,6 +201,9 @@
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
break;
+ case OPTIMIZED_CODE_ENTRY_TYPE:
+ OptimizedCodeEntry::cast(this)->OptimizedCodeEntryVerify();
+ break;
case JS_MESSAGE_OBJECT_TYPE:
JSMessageObject::cast(this)->JSMessageObjectVerify();
break;
@@ -577,6 +580,17 @@
}
+void OptimizedCodeEntry::OptimizedCodeEntryVerify() {
+ CHECK(IsOptimizedCodeEntry());
+ VerifyObjectField(kNativeContextOffset);
+ VerifyObjectField(kFunctionOffset);
+ VerifyObjectField(kCodeOffset);
+ VerifyObjectField(kLiteralsOffset);
+ VerifyObjectField(kNextBySharedInfoOffset);
+ VerifyObjectField(kNextByNativeContextOffset);
+}
+
+
void JSGlobalProxy::JSGlobalProxyVerify() {
CHECK(IsJSGlobalProxy());
JSObjectVerify();
diff --git a/src/objects-inl.h b/src/objects-inl.h
index b165ad4..77b4e38 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -675,6 +675,7 @@
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(OptimizedCodeEntry, OPTIMIZED_CODE_ENTRY_TYPE)
TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
@@ -2092,28 +2093,21 @@
void FixedArray::set_undefined(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- set_undefined(GetHeap(), index);
-}
-
-
-void FixedArray::set_undefined(Heap* heap, int index) {
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->undefined_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
- heap->undefined_value());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->undefined_value());
}
void FixedArray::set_null(int index) {
- set_null(GetHeap(), index);
-}
-
-
-void FixedArray::set_null(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->null_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->null_value());
}
@@ -2579,6 +2573,7 @@
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(SharedFunctionInfo)
+CAST_ACCESSOR(OptimizedCodeEntry)
CAST_ACCESSOR(Map)
CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(GlobalObject)
@@ -4931,6 +4926,52 @@
}
+ACCESSORS(OptimizedCodeEntry, native_context, Context, kNativeContextOffset)
+ACCESSORS(OptimizedCodeEntry, function, JSFunction, kFunctionOffset)
+ACCESSORS(OptimizedCodeEntry, code, Code, kCodeOffset)
+ACCESSORS(OptimizedCodeEntry, literals, FixedArray, kLiteralsOffset)
+
+
+OptimizedCodeEntry* OptimizedCodeEntry::next_by_shared_info() {
+ Object* object = READ_FIELD(this, kNextBySharedInfoOffset);
+ if (object == NULL) return NULL;
+ return OptimizedCodeEntry::cast(object);
+}
+
+
+OptimizedCodeEntry* OptimizedCodeEntry::next_by_native_context() {
+ Object* object = READ_FIELD(this, kNextByNativeContextOffset);
+ if (object == NULL) return NULL;
+ return OptimizedCodeEntry::cast(object);
+}
+
+
+void OptimizedCodeEntry::set_next_by_shared_info(OptimizedCodeEntry* value,
+ WriteBarrierMode mode) {
+ WRITE_FIELD(this, kNextBySharedInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(
+ GetHeap(), this, kNextBySharedInfoOffset, value, mode);
+}
+
+
+void OptimizedCodeEntry::set_next_by_native_context(OptimizedCodeEntry* value,
+ WriteBarrierMode mode) {
+ WRITE_FIELD(this, kNextByNativeContextOffset, value);
+ CONDITIONAL_WRITE_BARRIER(
+ GetHeap(), this, kNextByNativeContextOffset, value, mode);
+}
+
+
+bool OptimizedCodeEntry::cacheable() {
+ return static_cast<bool>(READ_BYTE_FIELD(this, kCacheableOffset));
+}
+
+
+void OptimizedCodeEntry::set_cacheable(bool val) {
+ WRITE_BYTE_FIELD(this, kCacheableOffset, static_cast<byte>(val));
+}
+
+
bool JSFunction::IsBuiltin() {
return context()->global_object()->IsJSBuiltinsObject();
}
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 9ea060f..dd6cd2c 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -195,6 +195,9 @@
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out);
break;
+ case OPTIMIZED_CODE_ENTRY_TYPE:
+ OptimizedCodeEntry::cast(this)->OptimizedCodeEntryPrint(out);
+ break;
case JS_MESSAGE_OBJECT_TYPE:
JSMessageObject::cast(this)->JSMessageObjectPrint(out);
break;
@@ -890,6 +893,25 @@
}
+void OptimizedCodeEntry::OptimizedCodeEntryPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "OptimizedCodeEntry");
+ PrintF(out, "\n - native_context = ");
+ native_context()->ShortPrint(out);
+ PrintF(out, "\n - function = ");
+ function()->ShortPrint(out);
+ PrintF(out, "\n - code = ");
+ code()->ShortPrint(out);
+ PrintF(out, "\n - literals = ");
+ literals()->ShortPrint(out);
+ PrintF(out, "\n - next_by_shared_info = ");
+ next_by_shared_info()->ShortPrint(out);
+ PrintF(out, "\n - next_by_native_context = ");
+ next_by_native_context()->ShortPrint(out);
+ PrintF(out, "\n - cacheable = %s", cacheable() ? "true" : "false");
+ PrintF(out, "\n");
+}
+
+
void JSGlobalProxy::JSGlobalProxyPrint(FILE* out) {
PrintF(out, "global_proxy ");
JSObjectPrint(out);
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index cd46013..ece8b35 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -119,6 +119,9 @@
case SHARED_FUNCTION_INFO_TYPE:
return kVisitSharedFunctionInfo;
+ case OPTIMIZED_CODE_ENTRY_TYPE:
+ return kVisitOptimizedCodeEntry;
+
case JS_PROXY_TYPE:
return GetVisitorIdForSize(kVisitStruct,
kVisitStructGeneric,
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 2175737..44ad8e9 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -92,6 +92,7 @@
V(Cell) \
V(PropertyCell) \
V(SharedFunctionInfo) \
+ V(OptimizedCodeEntry) \
V(JSFunction) \
V(JSWeakMap) \
V(JSWeakSet) \
diff --git a/src/objects.cc b/src/objects.cc
index 15cee1c..452c1d6 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1786,6 +1786,10 @@
SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
break;
}
+ case OPTIMIZED_CODE_ENTRY_TYPE: {
+ OptimizedCodeEntry::BodyDescriptor::IterateBody(this, v);
+ break;
+ }
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE:
@@ -1958,46 +1962,17 @@
FieldDescriptor new_field(name, index, attributes, representation);
- ASSERT(index < map()->inobject_properties() ||
- (index - map()->inobject_properties()) < properties()->length() ||
- map()->unused_property_fields() == 0);
-
- FixedArray* values = NULL;
-
- // TODO(verwaest): Merge with AddFastPropertyUsingMap.
- if (map()->unused_property_fields() == 0) {
- // Make room for the new value
- MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_values->To(&values)) return maybe_values;
- }
-
- Heap* heap = isolate->heap();
-
- Object* storage;
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(heap, representation);
- if (!maybe_storage->To(&storage)) return maybe_storage;
-
- // Note that Map::CopyAddDescriptor has side-effects, the new map is already
- // inserted in the transition tree. No more allocations that might fail are
- // allowed after this point.
Map* new_map;
MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- if (map()->unused_property_fields() == 0) {
- ASSERT(values != NULL);
- set_properties(values);
- new_map->set_unused_property_fields(kFieldsAdded - 1);
- } else {
- new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
+ int unused_property_fields = map()->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += kFieldsAdded;
}
+ new_map->set_unused_property_fields(unused_property_fields);
- set_map(new_map);
-
- FastPropertyAtPut(index, storage);
- return value;
+ return AddFastPropertyUsingMap(new_map, name, value, index, representation);
}
@@ -4093,6 +4068,11 @@
extensibility_check);
}
+ if (lookup.IsFound() &&
+ (lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) {
+ LocalLookupRealNamedProperty(name_raw, &lookup);
+ }
+
// Check for accessor in prototype chain removed here in clone.
if (!lookup.IsFound()) {
// Neither properties nor transitions found.
@@ -4134,31 +4114,14 @@
}
break;
case CALLBACKS:
- // Callbacks are not guaranteed to be installed on the receiver. Also
- // perform a local lookup again. Fall through.
- case INTERCEPTOR:
- self->LocalLookupRealNamedProperty(*name, &lookup);
- if (lookup.IsFound()) {
- if (lookup.IsPropertyCallbacks()) {
- result = ConvertAndSetLocalProperty(
- &lookup, *name, *value, attributes);
- } else if (lookup.IsNormal()) {
- result = self->ReplaceSlowProperty(*name, *value, attributes);
- } else {
- result = SetPropertyToFieldWithAttributes(
- &lookup, name, value, attributes);
- }
- } else {
- result = self->AddProperty(
- *name, *value, attributes, kNonStrictMode, MAY_BE_STORE_FROM_KEYED,
- extensibility_check, value_type, mode);
- }
+ result = ConvertAndSetLocalProperty(&lookup, *name, *value, attributes);
break;
case TRANSITION:
result = SetPropertyUsingTransition(&lookup, name, value, attributes);
break;
- case HANDLER:
case NONEXISTENT:
+ case HANDLER:
+ case INTERCEPTOR:
UNREACHABLE();
}
@@ -9471,6 +9434,15 @@
}
+void OptimizedCodeEntry::Kill() {
+ set_function(NULL, SKIP_WRITE_BARRIER);
+ set_code(NULL, SKIP_WRITE_BARRIER);
+ set_native_context(NULL, SKIP_WRITE_BARRIER);
+ set_literals(NULL, SKIP_WRITE_BARRIER);
+ set_cacheable(false);
+}
+
+
bool JSFunction::CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
bool result = true;
diff --git a/src/objects.h b/src/objects.h
index 988f1ab..040664d 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -131,6 +131,7 @@
// - Oddball
// - Foreign
// - SharedFunctionInfo
+// - OptimizedCodeEntry
// - Struct
// - Box
// - DeclaredAccessorDescriptor
@@ -406,6 +407,7 @@
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
+ V(OPTIMIZED_CODE_ENTRY_TYPE) \
\
V(JS_MESSAGE_OBJECT_TYPE) \
\
@@ -756,6 +758,7 @@
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ OPTIMIZED_CODE_ENTRY_TYPE,
JS_MESSAGE_OBJECT_TYPE,
@@ -1017,6 +1020,7 @@
V(Code) \
V(Oddball) \
V(SharedFunctionInfo) \
+ V(OptimizedCodeEntry) \
V(JSValue) \
V(JSDate) \
V(JSMessageObject) \
@@ -2913,11 +2917,7 @@
// Setters for frequently used oddballs located in old space.
inline void set_undefined(int index);
- // TODO(isolates): duplicate.
- inline void set_undefined(Heap* heap, int index);
inline void set_null(int index);
- // TODO(isolates): duplicate.
- inline void set_null(Heap* heap, int index);
inline void set_the_hole(int index);
inline Object** GetFirstElementAddress();
@@ -6841,6 +6841,76 @@
};
+// An optimized code entry represents an association between the native
+// context, a function, optimized code, and the literals. The entries
+// are linked into two lists for efficient lookup: by native context
+// (linked through next_by_native_context), or by shared function
+// info (linked through next_by_shared_info).
+// The references to the native context, function, and code are weak,
+// in order not to leak native contexts or functions through
+// SharedFunctionInfo. This means an entry can become "dead" through GC.
+// Entries are removed lazily as each list is traversed.
+class OptimizedCodeEntry: public HeapObject {
+ public:
+ // [native_context]: The native context of this entry. (WEAK)
+ DECL_ACCESSORS(native_context, Context)
+
+ // [function]: The JSFunction of this entry. (WEAK)
+ DECL_ACCESSORS(function, JSFunction)
+
+ // [code]: The optimized code of this entry. (WEAK)
+ DECL_ACCESSORS(code, Code)
+
+ // [literals]: Array of literals for this entry.
+ DECL_ACCESSORS(literals, FixedArray)
+
+ // [next_by_shared_info]: The next link in the list, when traversing
+ // starting with a SharedFunctionInfo. (NULL if none).
+ DECL_ACCESSORS(next_by_shared_info, OptimizedCodeEntry)
+
+ // [next_by_native_context]: The next link in the list, when traversing
+ // starting with a native context. (NULL if none)
+ DECL_ACCESSORS(next_by_native_context, OptimizedCodeEntry)
+
+ // Casting.
+ static inline OptimizedCodeEntry* cast(Object* obj);
+
+ DECLARE_PRINTER(OptimizedCodeEntry)
+ DECLARE_VERIFIER(OptimizedCodeEntry)
+
+ // Layout description.
+ static const int kNativeContextOffset = JSObject::kHeaderSize;
+ static const int kFunctionOffset = kNativeContextOffset + kPointerSize;
+ static const int kCodeOffset = kFunctionOffset + kPointerSize;
+ static const int kLiteralsOffset = kCodeOffset + kPointerSize;
+ static const int kNextBySharedInfoOffset =
+ kLiteralsOffset + kPointerSize;
+ static const int kNextByNativeContextOffset =
+ kNextBySharedInfoOffset + kPointerSize;
+ static const int kCacheableOffset = kNextByNativeContextOffset + kPointerSize;
+ static const int kSize = kCacheableOffset + kIntSize;
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kSize);
+
+ typedef FixedBodyDescriptor<kLiteralsOffset,
+ kNextByNativeContextOffset + kPointerSize,
+ kSize> BodyDescriptor;
+
+ // Kills an entry, nulling out its references to native context, function,
+ // code, and literals.
+ void Kill();
+ inline bool cacheable();
+ inline void set_cacheable(bool val);
+
+ private:
+ // Used internally during traversal to skip dead entries.
+ inline bool IsDead() {
+ return function() == NULL || code() == NULL;
+ }
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(OptimizedCodeEntry);
+};
+
+
class JSGeneratorObject: public JSObject {
public:
// [function]: The function corresponding to this generator object.
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 1244493..58d0a24 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -219,11 +219,6 @@
}
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
if (FLAG_break_on_abort) {
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index f52bb43..c136631 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -991,13 +991,6 @@
}
-int OS::NumberOfCores() {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- return info.dwNumberOfProcessors;
-}
-
-
void OS::Abort() {
if (IsDebuggerPresent() || FLAG_break_on_abort) {
DebugBreak();
diff --git a/src/platform.h b/src/platform.h
index 10a7e2c..a42bb5a 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -58,13 +58,6 @@
# endif
#endif
-// GCC specific stuff
-#ifdef __GNUC__
-
-#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
-
-#endif // __GNUC__
-
// Microsoft Visual C++ specific stuff.
#if V8_CC_MSVC
@@ -280,8 +273,6 @@
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
- static int NumberOfCores();
-
// Abort the current process.
static void Abort();
diff --git a/src/version.cc b/src/version.cc
index 176abf9..d047a12 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 21
-#define BUILD_NUMBER 5
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 6
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 094d239..ce66285 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -4915,10 +4915,10 @@
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- __ CmpHeapObject(reg, target);
+ Handle<HeapObject> object = instr->hydrogen()->object();
+ __ CmpHeapObject(reg, object);
DeoptimizeIf(not_equal, instr->environment());
}
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 57938b2..1b4332a 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1789,13 +1789,6 @@
}
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), rax);
return MarkAsCall(new(zone()) LThrow(value), instr);
@@ -1940,9 +1933,9 @@
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 00642ff..e95713d 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -62,12 +62,12 @@
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
@@ -2260,16 +2260,16 @@
};
-class LCheckFunction V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index ebfea0e..c034e48 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -52,6 +52,9 @@
# Boot up memory use is bloated in debug mode.
test-mark-compact/BootUpMemoryUse: PASS, PASS || FAIL if $mode == debug
+# Some CPU profiler tests are flaky.
+test-cpu-profiler/*: PASS || FLAKY
+
##############################################################################
[ $arch == arm ]
diff --git a/test/cctest/test-cpu.cc b/test/cctest/test-cpu.cc
index 1c61ab1..06966c6 100644
--- a/test/cctest/test-cpu.cc
+++ b/test/cctest/test-cpu.cc
@@ -48,3 +48,8 @@
// arm features
CHECK(!cpu.has_vfp3_d32() || cpu.has_vfp3());
}
+
+
+TEST(NumberOfProcessorsOnline) {
+ CHECK_GT(CPU::NumberOfProcessorsOnline(), 0);
+}
diff --git a/test/cctest/test-platform.cc b/test/cctest/test-platform.cc
index 2d8eb20..079cbd1 100644
--- a/test/cctest/test-platform.cc
+++ b/test/cctest/test-platform.cc
@@ -32,11 +32,6 @@
using namespace ::v8::internal;
-TEST(NumberOfCores) {
- CHECK_GT(OS::NumberOfCores(), 0);
-}
-
-
#ifdef __GNUC__
#define ASM __asm__ __volatile__
diff --git a/test/mjsunit/compiler/escape-analysis.js b/test/mjsunit/compiler/escape-analysis.js
index 9b9341b..f32069c 100644
--- a/test/mjsunit/compiler/escape-analysis.js
+++ b/test/mjsunit/compiler/escape-analysis.js
@@ -173,3 +173,30 @@
delete deopt.deopt;
func(); func();
})();
+
+
+// Test map checks on captured objects.
+(function testMapCheck() {
+ var sum = 0;
+ function getter() { return 27; }
+ function setter(v) { sum += v; }
+ function constructor() {
+ this.x = 23;
+ this.y = 42;
+ }
+ function check(x, y) {
+ var o = new constructor();
+ assertEquals(x, o.x);
+ assertEquals(y, o.y);
+ }
+ var monkey = Object.create(null, {
+ x: { get:getter, set:setter },
+ y: { get:getter, set:setter }
+ });
+ check(23, 42); check(23, 42);
+ %OptimizeFunctionOnNextCall(check);
+ check(23, 42); check(23, 42);
+ constructor.prototype = monkey;
+ check(27, 27); check(27, 27);
+ assertEquals(130, sum);
+})();
diff --git a/test/mjsunit/regress/regress-2843.js b/test/mjsunit/regress/regress-2843.js
new file mode 100644
index 0000000..5b28c2d
--- /dev/null
+++ b/test/mjsunit/regress/regress-2843.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function bailout() { throw "bailout"; }
+var global;
+
+function foo(x, fun) {
+ var a = x + 1;
+ var b = x + 2; // Need another Simulate to fold the first one into.
+ global = true; // Need a side effect to deopt to.
+ fun();
+ return a;
+}
+
+assertThrows("foo(1, bailout)");
+assertThrows("foo(1, bailout)");
+%OptimizeFunctionOnNextCall(foo);
+assertThrows("foo(1, bailout)");
+assertEquals(2, foo(1, function() {}));