Merge v8 from https://chromium.googlesource.com/external/v8.git at 7ff7607c2315ea91e4d13330ce14125e4bb4851a
This commit was generated by merge_from_chromium.py.
Change-Id: Idf1d953e317e85f79df5d9e9cb8b40cc1c187163
diff --git a/ChangeLog b/ChangeLog
index c545d6b..42ee840 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,20 @@
+2013-11-21: Version 3.23.9
+
+ API: Change AdjustAmountOfExternalAllocatedMemory calls to use int64_t
+ instead of intptr_t.
+
+ Remove deprecated v8::SetResourceConstraints without Isolate parameter.
+
+ Remove deprecated v8-defaults.h and defaults.cc.
+ (Chromium issue 312241)
+
+ Make it possible to add more than one piece of embedder data to
+ isolates.
+ (Chromium issue 317398)
+
+ Performance and stability improvements on all platforms.
+
+
2013-11-20: Version 3.23.8
Fixed crashes exposed though fuzzing.
diff --git a/include/v8-defaults.h b/include/v8-defaults.h
deleted file mode 100644
index b55c07f..0000000
--- a/include/v8-defaults.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8_DEFAULTS_H_
-#define V8_V8_DEFAULTS_H_
-
-#include "v8.h"
-
-/**
- * Default configuration support for the V8 JavaScript engine.
- */
-namespace v8 {
-
-V8_DEPRECATED("Use ResourceConstraints::ConfigureDefaults instead",
- bool V8_EXPORT ConfigureResourceConstraintsForCurrentPlatform(
- ResourceConstraints* constraints));
-
-
-V8_DEPRECATED("Use ResourceConstraints::ConfigureDefaults instead",
- bool V8_EXPORT SetDefaultResourceConstraintsForCurrentPlatform());
-
-} // namespace v8
-
-#endif // V8_V8_DEFAULTS_H_
diff --git a/include/v8.h b/include/v8.h
index 2f8af27..6e227dd 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -3841,11 +3841,6 @@
};
-V8_DEPRECATED(
- "Use SetResourceConstraints(isolate, constraints) instead",
- bool V8_EXPORT SetResourceConstraints(ResourceConstraints* constraints));
-
-
/**
* Sets the given ResourceConstraints on the given Isolate.
*/
@@ -4059,17 +4054,37 @@
void Dispose();
/**
- * Associate embedder-specific data with the isolate
+ * Associate embedder-specific data with the isolate. This legacy method
+ * puts the data in the 0th slot. It will be deprecated soon.
*/
V8_INLINE void SetData(void* data);
/**
- * Retrieve embedder-specific data from the isolate.
+ * Associate embedder-specific data with the isolate. |slot| has to be
+ * between 0 and GetNumberOfDataSlots() - 1.
+ */
+ V8_INLINE void SetData(uint32_t slot, void* data);
+
+ /**
+ * Retrieve embedder-specific data from the isolate. This legacy method
+ * retrieves the data from slot 0. It will be deprecated soon.
* Returns NULL if SetData has never been called.
*/
V8_INLINE void* GetData();
/**
+ * Retrieve embedder-specific data from the isolate.
+ * Returns NULL if SetData has never been called for the given |slot|.
+ */
+ V8_INLINE void* GetData(uint32_t slot);
+
+ /**
+ * Returns the maximum number of available embedder data slots. Valid slots
+ * are in the range of 0 - GetNumberOfDataSlots() - 1.
+ */
+ V8_INLINE static uint32_t GetNumberOfDataSlots();
+
+ /**
* Get statistics about the heap memory usage.
*/
void GetHeapStatistics(HeapStatistics* heap_statistics);
@@ -4087,7 +4102,7 @@
* kept alive by JavaScript objects.
* \returns the adjusted value.
*/
- intptr_t AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes);
+ int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
/**
* Returns heap profiler for this isolate. Will return NULL until the isolate
@@ -4670,8 +4685,8 @@
V8_DEPRECATED(
"Use Isolate::AdjustAmountOfExternalAllocatedMemory instead",
- static intptr_t AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes));
+ static int64_t AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes));
/**
* Forcefully terminate the current thread of JavaScript execution
@@ -5453,8 +5468,8 @@
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kExternalAsciiRepresentationTag = 0x06;
- static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize;
- static const int kIsolateRootsOffset = 3 * kApiPointerSize;
+ static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
+ static const int kIsolateRootsOffset = 5 * kApiPointerSize;
static const int kUndefinedValueRootIndex = 5;
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
@@ -5478,6 +5493,8 @@
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
+ static const uint32_t kNumIsolateDataSlots = 4;
+
V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
#ifdef V8_ENABLE_CHECKS
@@ -5541,15 +5558,17 @@
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
- V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, void* data) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset;
+ V8_INLINE static void SetEmbedderData(v8::Isolate *isolate,
+ uint32_t slot,
+ void *data) {
+ uint8_t *addr = reinterpret_cast<uint8_t *>(isolate) +
+ kIsolateEmbedderDataOffset + slot * kApiPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
- V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate) {
+ V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate, uint32_t slot) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset;
+ kIsolateEmbedderDataOffset + slot * kApiPointerSize;
return *reinterpret_cast<void**>(addr);
}
@@ -6475,13 +6494,31 @@
void Isolate::SetData(void* data) {
typedef internal::Internals I;
- I::SetEmbedderData(this, data);
+ I::SetEmbedderData(this, 0, data);
}
void* Isolate::GetData() {
typedef internal::Internals I;
- return I::GetEmbedderData(this);
+ return I::GetEmbedderData(this, 0);
+}
+
+
+void Isolate::SetData(uint32_t slot, void* data) {
+ typedef internal::Internals I;
+ I::SetEmbedderData(this, slot, data);
+}
+
+
+void* Isolate::GetData(uint32_t slot) {
+ typedef internal::Internals I;
+ return I::GetEmbedderData(this, slot);
+}
+
+
+uint32_t Isolate::GetNumberOfDataSlots() {
+ typedef internal::Internals I;
+ return I::kNumIsolateDataSlots;
}
diff --git a/src/allocation-inl.h b/src/allocation-inl.h
deleted file mode 100644
index d32db4b..0000000
--- a/src/allocation-inl.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ALLOCATION_INL_H_
-#define V8_ALLOCATION_INL_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-void* PreallocatedStorageAllocationPolicy::New(size_t size) {
- return Isolate::Current()->PreallocatedStorageNew(size);
-}
-
-
-void PreallocatedStorageAllocationPolicy::Delete(void* p) {
- return Isolate::Current()->PreallocatedStorageDelete(p);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ALLOCATION_INL_H_
diff --git a/src/allocation.cc b/src/allocation.cc
index 94aaad3..69edf69 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -100,24 +100,4 @@
return result;
}
-
-void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
- next_ = other->next_;
- other->next_->previous_ = this;
- previous_ = other;
- other->next_ = this;
-}
-
-
-void PreallocatedStorage::Unlink() {
- next_->previous_ = previous_;
- previous_->next_ = next_;
-}
-
-
-PreallocatedStorage::PreallocatedStorage(size_t size)
- : size_(size) {
- previous_ = next_ = this;
-}
-
} } // namespace v8::internal
diff --git a/src/allocation.h b/src/allocation.h
index 45bde4c..03cc8f5 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -109,34 +109,6 @@
};
-// Allocation policy for allocating in preallocated space.
-// Used as an allocation policy for ScopeInfo when generating
-// stack traces.
-class PreallocatedStorage {
- public:
- explicit PreallocatedStorage(size_t size);
- size_t size() { return size_; }
-
- private:
- size_t size_;
- PreallocatedStorage* previous_;
- PreallocatedStorage* next_;
-
- void LinkTo(PreallocatedStorage* other);
- void Unlink();
-
- friend class Isolate;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
-};
-
-
-struct PreallocatedStorageAllocationPolicy {
- INLINE(void* New(size_t size));
- INLINE(static void Delete(void* ptr));
-};
-
-
} } // namespace v8::internal
#endif // V8_ALLOCATION_H_
diff --git a/src/api.cc b/src/api.cc
index 8919cca..a448e19 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -610,13 +610,6 @@
}
-bool SetResourceConstraints(ResourceConstraints* constraints) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
- return SetResourceConstraints(reinterpret_cast<Isolate*>(isolate),
- constraints);
-}
-
-
bool SetResourceConstraints(Isolate* v8_isolate,
ResourceConstraints* constraints) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@@ -6396,14 +6389,14 @@
}
-intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes) {
+int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
return heap->AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
}
-intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
+int64_t V8::AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized()) {
return 0;
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index ae50d79..19fa9f2 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -844,7 +844,7 @@
// Perform prologue operations usually performed by the young code stub.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub.
__ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize));
@@ -1177,11 +1177,13 @@
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
@@ -1341,7 +1343,8 @@
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(3 * kPointerSize));
+ __ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1351,7 +1354,8 @@
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ ldr(r1, MemOperand(fp, -3 * kPointerSize));
+ __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
@@ -1438,7 +1442,9 @@
// r3: code entry to call
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
- __ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
+ // Adjust for frame.
+ __ sub(r2, r2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 85cb610..238d34e 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -851,7 +851,8 @@
PredictableCodeSizeScope scope(patcher.masm(), *length);
patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
patcher.masm()->nop(ip.code());
- patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
+ patcher.masm()->add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
initialized = true;
}
return byte_sequence;
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 64a718e..6c9ec4a 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -102,7 +102,8 @@
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -3 * kPointerSize;
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index d95055c..64a9fdf 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1635,8 +1635,7 @@
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- int depth = 1;
- expr->BuildConstantProperties(isolate(), &depth);
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
@@ -1651,7 +1650,7 @@
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- depth > 1 || Serializer::enabled() ||
+ expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
@@ -1770,8 +1769,7 @@
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- int depth = 1;
- expr->BuildConstantElements(isolate(), &depth);
+ expr->BuildConstantElements(isolate());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1795,7 +1793,7 @@
__ CallStub(&stub);
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
- } else if (depth > 1 || Serializer::enabled() ||
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ Push(r3, r2, r1);
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index a6644af..a1e892b 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1483,10 +1483,6 @@
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- return AssignEnvironment(DefineAsRegister(mod));
} else if (CpuFeatures::IsSupported(SUDIV)) {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right));
@@ -2554,15 +2550,8 @@
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length;
- LOperand* index;
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- length = UseRegisterOrConstant(instr->length());
- index = UseOrConstant(instr->index());
- } else {
- length = UseTempRegister(instr->length());
- index = UseRegisterAtStart(instr->index());
- }
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 0d4d805..0aa8197 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -260,7 +260,7 @@
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -325,7 +325,7 @@
ASSERT(info()->IsStub());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, ip);
}
@@ -1133,36 +1133,6 @@
__ bind(&left_is_not_negative);
__ and_(result_reg, left_reg, Operand(divisor - 1));
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmp(right_reg, Operand(divisor));
- DeoptimizeIf(ne, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ cmp(left_reg, Operand::Zero());
- __ b(pl, &left_is_not_negative);
- __ rsb(result_reg, left_reg, Operand::Zero());
- __ and_(result_reg, result_reg, Operand(divisor - 1));
- __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
- }
- __ b(&done);
- }
-
- __ bind(&left_is_not_negative);
- __ and_(result_reg, left_reg, Operand(divisor - 1));
- __ bind(&done);
-
} else if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
@@ -3216,20 +3186,35 @@
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->length()->IsConstantOperand()) {
int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- int index = (const_length - const_index) + 1;
- __ ldr(result, MemOperand(arguments, index * kPointerSize));
- } else {
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ ldr(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ rsb(result, index, Operand(const_length + 1));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+ }
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ sub(result, length, Operand(loc));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+ } else {
+ __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ }
+ } else {
Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, length, index);
- __ add(length, length, Operand(1));
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ __ sub(result, length, index);
+ __ add(result, result, Operand(1));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
}
}
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index acd398a..47d42a3 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -867,7 +867,7 @@
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
- add(fp, sp, Operand(2 * kPointerSize));
+ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
} else {
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
@@ -883,7 +883,7 @@
stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
nop(ip.code());
// Adjust FP to point to saved FP.
- add(fp, sp, Operand(2 * kPointerSize));
+ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
}
@@ -896,7 +896,9 @@
push(ip);
mov(ip, Operand(CodeObject()));
push(ip);
- add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
+ // Adjust FP to point to saved FP.
+ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
diff --git a/src/ast.cc b/src/ast.cc
index adf0fb8..3ca1449 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -262,7 +262,7 @@
}
-void ObjectLiteral::BuildConstantProperties(Isolate* isolate, int* depth) {
+void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
if (!constant_properties_.is_null()) return;
// Allocate a fixed array to hold all the constant properties.
@@ -283,9 +283,8 @@
}
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
if (m_literal != NULL) {
- int inner_depth = 1;
- m_literal->BuildConstants(isolate, &inner_depth);
- if (inner_depth >= depth_acc) depth_acc = inner_depth + 1;
+ m_literal->BuildConstants(isolate);
+ if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1;
}
// Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
@@ -334,11 +333,11 @@
fast_elements_ =
(max_element_index <= 32) || ((2 * elements) >= max_element_index);
set_is_simple(is_simple);
- if (depth != NULL) *depth = depth_acc;
+ set_depth(depth_acc);
}
-void ArrayLiteral::BuildConstantElements(Isolate* isolate, int* depth) {
+void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
if (!constant_elements_.is_null()) return;
// Allocate a fixed array to hold all the object literals.
@@ -355,9 +354,10 @@
Expression* element = values()->at(i);
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != NULL) {
- int inner_depth = 1;
- m_literal->BuildConstants(isolate, &inner_depth);
- if (inner_depth + 1 > depth_acc) depth_acc = inner_depth + 1;
+ m_literal->BuildConstants(isolate);
+ if (m_literal->depth() + 1 > depth_acc) {
+ depth_acc = m_literal->depth() + 1;
+ }
}
Handle<Object> boilerplate_value = GetBoilerplateValue(element, isolate);
if (boilerplate_value->IsTheHole()) {
@@ -392,7 +392,7 @@
constant_elements_ = literals;
set_is_simple(is_simple);
- if (depth != NULL) *depth = depth_acc;
+ set_depth(depth_acc);
}
@@ -408,14 +408,15 @@
}
-void MaterializedLiteral::BuildConstants(Isolate* isolate, int* depth) {
+void MaterializedLiteral::BuildConstants(Isolate* isolate) {
if (IsArrayLiteral()) {
- return AsArrayLiteral()->BuildConstantElements(isolate, depth);
+ return AsArrayLiteral()->BuildConstantElements(isolate);
}
if (IsObjectLiteral()) {
- return AsObjectLiteral()->BuildConstantProperties(isolate, depth);
+ return AsObjectLiteral()->BuildConstantProperties(isolate);
}
ASSERT(IsRegExpLiteral());
+ ASSERT(depth() >= 1); // Depth should be initialized.
}
diff --git a/src/ast.h b/src/ast.h
index 2a86696..e3fc053 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1409,13 +1409,20 @@
int literal_index() { return literal_index_; }
+ int depth() const {
+ // only callable after initialization.
+ ASSERT(depth_ >= 1);
+ return depth_;
+ }
+
protected:
MaterializedLiteral(Isolate* isolate,
int literal_index,
int pos)
: Expression(isolate, pos),
literal_index_(literal_index),
- is_simple_(false) {}
+ is_simple_(false),
+ depth_(0) {}
// A materialized literal is simple if the values consist of only
// constants and simple object and array literals.
@@ -1423,8 +1430,13 @@
void set_is_simple(bool is_simple) { is_simple_ = is_simple; }
friend class CompileTimeValue;
+ void set_depth(int depth) {
+ ASSERT(depth >= 1);
+ depth_ = depth;
+ }
+
// Populate the constant properties/elements fixed array.
- void BuildConstants(Isolate* isolate, int* depth);
+ void BuildConstants(Isolate* isolate);
friend class ArrayLiteral;
friend class ObjectLiteral;
@@ -1438,6 +1450,7 @@
private:
int literal_index_;
bool is_simple_;
+ int depth_;
};
@@ -1505,7 +1518,7 @@
static bool IsBoilerplateProperty(Property* property);
// Populate the constant properties fixed array.
- void BuildConstantProperties(Isolate* isolate, int* depth = NULL);
+ void BuildConstantProperties(Isolate* isolate);
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
@@ -1564,7 +1577,9 @@
int pos)
: MaterializedLiteral(isolate, literal_index, pos),
pattern_(pattern),
- flags_(flags) {}
+ flags_(flags) {
+ set_depth(1);
+ }
private:
Handle<String> pattern_;
@@ -1587,7 +1602,7 @@
}
// Populate the constant elements fixed array.
- void BuildConstantElements(Isolate* isolate, int* depth = NULL);
+ void BuildConstantElements(Isolate* isolate);
protected:
ArrayLiteral(Isolate* isolate,
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index a992dcb..62e19e1 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -896,17 +896,17 @@
if_leftisstring.If<HIsStringAndBranch>(left);
if_leftisstring.Then();
{
- Push(AddInstruction(BuildBinaryOperation(
+ Push(BuildBinaryOperation(
stub->operation(), left, right,
handle(Type::String(), isolate()), right_type,
- result_type, stub->fixed_right_arg(), true)));
+ result_type, stub->fixed_right_arg(), true));
}
if_leftisstring.Else();
{
- Push(AddInstruction(BuildBinaryOperation(
+ Push(BuildBinaryOperation(
stub->operation(), left, right,
left_type, right_type, result_type,
- stub->fixed_right_arg(), true)));
+ stub->fixed_right_arg(), true));
}
if_leftisstring.End();
result = Pop();
@@ -915,26 +915,26 @@
if_rightisstring.If<HIsStringAndBranch>(right);
if_rightisstring.Then();
{
- Push(AddInstruction(BuildBinaryOperation(
+ Push(BuildBinaryOperation(
stub->operation(), left, right,
left_type, handle(Type::String(), isolate()),
- result_type, stub->fixed_right_arg(), true)));
+ result_type, stub->fixed_right_arg(), true));
}
if_rightisstring.Else();
{
- Push(AddInstruction(BuildBinaryOperation(
+ Push(BuildBinaryOperation(
stub->operation(), left, right,
left_type, right_type, result_type,
- stub->fixed_right_arg(), true)));
+ stub->fixed_right_arg(), true));
}
if_rightisstring.End();
result = Pop();
}
} else {
- result = AddInstruction(BuildBinaryOperation(
+ result = BuildBinaryOperation(
stub->operation(), left, right,
left_type, right_type, result_type,
- stub->fixed_right_arg(), true));
+ stub->fixed_right_arg(), true);
}
// If we encounter a generic argument, the number conversion is
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 166d46d..051dd45 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -586,6 +586,14 @@
ASSERT(result_state_ <= (has_int_result() ? INT32 : NUMBER) ||
op_ == Token::ADD);
+ // Reset overwrite mode unless we can actually make use of it, or may be able
+ // to make use of it at some point in the future.
+ if ((mode_ == OVERWRITE_LEFT && left_state_ > NUMBER) ||
+ (mode_ == OVERWRITE_RIGHT && right_state_ > NUMBER) ||
+ result_state_ > NUMBER) {
+ mode_ = NO_OVERWRITE;
+ }
+
if (old_state == GetExtraICState()) {
// Tagged operations can lead to non-truncating HChanges
if (left->IsUndefined() || left->IsBoolean()) {
diff --git a/src/d8.cc b/src/d8.cc
index 339cc32..5b128c0 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -49,7 +49,6 @@
#endif // !V8_SHARED
#ifdef V8_SHARED
-#include "../include/v8-defaults.h"
#include "../include/v8-testing.h"
#endif // V8_SHARED
@@ -68,7 +67,6 @@
#include "natives.h"
#include "platform.h"
#include "v8.h"
-#include "v8-defaults.h"
#endif // V8_SHARED
#if !defined(_WIN32) && !defined(_WIN64)
@@ -92,15 +90,15 @@
public:
explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) {
HandleScope scope(isolate);
- isolate->SetData(this);
+ isolate->SetData(0, this);
}
~PerIsolateData() {
- isolate_->SetData(NULL); // Not really needed, just to be sure...
+ isolate_->SetData(0, NULL); // Not really needed, just to be sure...
}
inline static PerIsolateData* Get(Isolate* isolate) {
- return reinterpret_cast<PerIsolateData*>(isolate->GetData());
+ return reinterpret_cast<PerIsolateData*>(isolate->GetData(0));
}
class RealmScope {
diff --git a/src/defaults.cc b/src/defaults.cc
deleted file mode 100644
index 6bfbfef..0000000
--- a/src/defaults.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The GYP based build ends up defining USING_V8_SHARED when compiling this
-// file.
-#undef USING_V8_SHARED
-#include "../include/v8-defaults.h"
-
-#include "platform.h"
-#include "globals.h"
-#include "v8.h"
-
-namespace v8 {
-
-
-// TODO(rmcilroy): Remove this function once it is no longer used in Chrome.
-bool ConfigureResourceConstraintsForCurrentPlatform(
- ResourceConstraints* constraints) {
- if (constraints == NULL) {
- return false;
- }
-
- int lump_of_memory = (i::kPointerSize / 4) * i::MB;
-
- // The young_space_size should be a power of 2 and old_generation_size should
- // be a multiple of Page::kPageSize.
-#if V8_OS_ANDROID
- constraints->set_max_young_space_size(8 * lump_of_memory);
- constraints->set_max_old_space_size(256 * lump_of_memory);
- constraints->set_max_executable_size(192 * lump_of_memory);
-#else
- constraints->set_max_young_space_size(16 * lump_of_memory);
- constraints->set_max_old_space_size(700 * lump_of_memory);
- constraints->set_max_executable_size(256 * lump_of_memory);
-#endif
- return true;
-}
-
-
-// TODO(rmcilroy): Remove this function once it is no longer used in Chrome.
-bool SetDefaultResourceConstraintsForCurrentPlatform() {
- ResourceConstraints constraints;
- if (!ConfigureResourceConstraintsForCurrentPlatform(&constraints))
- return false;
- return SetResourceConstraints(&constraints);
-}
-
-} // namespace v8
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 2f4d7dc..e39c345 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -181,7 +181,8 @@
// Always use the actual stack slots when calculating the fp to sp
// delta adding two for the function and context.
unsigned stack_slots = code->stack_slots();
- unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
+ unsigned fp_to_sp_delta = (stack_slots * kPointerSize) +
+ StandardFrameConstants::kFixedFrameSizeFromFp;
Deoptimizer* deoptimizer = new Deoptimizer(isolate,
function,
@@ -890,7 +891,8 @@
// If the optimized frame had alignment padding, adjust the frame pointer
// to point to the new position of the old frame pointer after padding
// is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(fp_reg.code()) - (2 * kPointerSize) -
+ top_address = input_->GetRegister(fp_reg.code()) -
+ StandardFrameConstants::kFixedFrameSizeFromFp -
height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
@@ -1303,14 +1305,14 @@
" translating %s stub => height=%u\n", kind, height_in_bytes);
}
- // We need 1 stack entry for the return address + 4 stack entries from
- // StackFrame::INTERNAL (FP, context, frame type, code object, see
+ // We need 1 stack entry for the return address and enough entries for the
+ // StackFrame::INTERNAL (FP, context, frame type and code object - see
// MacroAssembler::EnterFrame). For a setter stub frame we need one additional
// entry for the implicit return value, see
// StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = (kPCOnStackSize / kPointerSize) +
- (kFPOnStackSize / kPointerSize) + 3 +
- (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_entries =
+ (StandardFrameConstants::kFixedFrameSize / kPointerSize) + 1 +
+ (is_setter_stub_frame ? 1 : 0);
unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
@@ -1483,7 +1485,7 @@
// context and function slots.
Register fp_reg = StubFailureTrampolineFrame::fp_register();
intptr_t top_address = input_->GetRegister(fp_reg.code()) -
- (2 * kPointerSize) - height_in_bytes;
+ StandardFrameConstants::kFixedFrameSizeFromFp - height_in_bytes;
output_frame->SetTop(top_address);
// Read caller's PC (JSFunction continuation) from the input frame.
@@ -2454,8 +2456,9 @@
unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned fixed_size = ComputeFixedSize(function_);
// The fp-to-sp delta already takes the context and the function
- // into account so we have to avoid double counting them (-2).
- unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
+ // into account so we have to avoid double counting them.
+ unsigned result = fixed_size + fp_to_sp_delta_ -
+ StandardFrameConstants::kFixedFrameSizeFromFp;
#ifdef DEBUG
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
diff --git a/src/extensions/statistics-extension.cc b/src/extensions/statistics-extension.cc
index 651d99d..586b2e3 100644
--- a/src/extensions/statistics-extension.cc
+++ b/src/extensions/statistics-extension.cc
@@ -58,6 +58,14 @@
}
+static void AddNumber64(v8::Local<v8::Object> object,
+ int64_t value,
+ const char* name) {
+ object->Set(v8::String::New(name),
+ v8::Number::New(static_cast<double>(value)));
+}
+
+
void StatisticsExtension::GetCounters(
const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
@@ -145,8 +153,8 @@
"lo_space_available_bytes");
AddNumber(result, heap->lo_space()->CommittedMemory(),
"lo_space_commited_bytes");
- AddNumber(result, heap->amount_of_external_allocated_memory(),
- "amount_of_external_allocated_memory");
+ AddNumber64(result, heap->amount_of_external_allocated_memory(),
+ "amount_of_external_allocated_memory");
args.GetReturnValue().Set(result);
}
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index a5cf326..62cb307 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -202,7 +202,7 @@
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(compiled_keyed_dictionary_loads, false,
+DEFINE_bool(compiled_keyed_dictionary_loads, true,
"use optimizing compiler to generate keyed dictionary load stubs")
DEFINE_bool(clever_optimizations, true,
"Optimize object size, Array shift, DOM strings and string +")
@@ -600,8 +600,6 @@
"abort program (dump core) when an uncaught exception is thrown")
DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
-DEFINE_bool(preallocate_message_memory, false,
- "preallocate some memory to build stack traces.")
DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
diff --git a/src/frames.cc b/src/frames.cc
index 4abacad..912c822 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -38,8 +38,6 @@
#include "string-stream.h"
#include "vm-state-inl.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
diff --git a/src/frames.h b/src/frames.h
index d2dbfe2..230144d 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -170,14 +170,15 @@
// context and function.
// StandardFrame::IterateExpressions assumes that kContextOffset is the last
// object pointer.
- static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
- 2 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kFPOnStackSize;
- static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+ static const int kFixedFrameSizeFromFp = 2 * kPointerSize;
+ static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
+ kFixedFrameSizeFromFp;
+ static const int kExpressionsOffset = -3 * kPointerSize;
+ static const int kMarkerOffset = -2 * kPointerSize;
+ static const int kContextOffset = -1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kFPOnStackSize;
+ static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
};
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 168aa50..9d57c99 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -541,10 +541,10 @@
}
-intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes) {
+int64_t Heap::AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes) {
ASSERT(HasBeenSetUp());
- intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
+ int64_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes > 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
@@ -554,7 +554,7 @@
amount_of_external_allocated_memory_ = 0;
amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
- intptr_t amount_since_last_global_gc = PromotedExternalMemorySize();
+ int64_t amount_since_last_global_gc = PromotedExternalMemorySize();
if (amount_since_last_global_gc > external_allocation_limit_) {
CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
}
@@ -573,9 +573,9 @@
PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
"amount=%6" V8_PTR_PREFIX "d KB, since_gc=%6" V8_PTR_PREFIX "d KB, "
"isolate=0x%08" V8PRIxPTR ".\n",
- change_in_bytes / KB,
- amount_of_external_allocated_memory_ / KB,
- PromotedExternalMemorySize() / KB,
+ static_cast<intptr_t>(change_in_bytes / KB),
+ static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB),
+ static_cast<intptr_t>(PromotedExternalMemorySize() / KB),
reinterpret_cast<intptr_t>(isolate()));
}
ASSERT(amount_of_external_allocated_memory_ >= 0);
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index b45e97b..a4ec5a9 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -27,7 +27,6 @@
#include "v8.h"
-#include "deoptimizer.h"
#include "heap-profiler.h"
#include "heap-snapshot-generator-inl.h"
@@ -160,7 +159,6 @@
StartHeapObjectsTracking();
heap()->DisableInlineAllocation();
is_tracking_allocations_ = true;
- DropCompiledCode();
snapshots_->UpdateHeapObjectsMap();
}
@@ -169,45 +167,6 @@
StopHeapObjectsTracking();
heap()->EnableInlineAllocation();
is_tracking_allocations_ = false;
- DropCompiledCode();
-}
-
-
-void HeapProfiler::DropCompiledCode() {
- Isolate* isolate = heap()->isolate();
- HandleScope scope(isolate);
-
- if (isolate->concurrent_recompilation_enabled()) {
- isolate->optimizing_compiler_thread()->Flush();
- }
-
- Deoptimizer::DeoptimizeAll(isolate);
-
- Handle<Code> lazy_compile =
- Handle<Code>(isolate->builtins()->builtin(Builtins::kLazyCompile));
-
- heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "switch allocations tracking");
-
- DisallowHeapAllocation no_allocation;
-
- HeapIterator iterator(heap());
- HeapObject* obj = NULL;
- while (((obj = iterator.next()) != NULL)) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- SharedFunctionInfo* shared = function->shared();
-
- if (!shared->allows_lazy_compilation()) continue;
- if (!shared->script()->IsScript()) continue;
-
- Code::Kind kind = function->code()->kind();
- if (kind == Code::FUNCTION || kind == Code::BUILTIN) {
- function->set_code(*lazy_compile);
- shared->set_code(*lazy_compile);
- }
- }
- }
}
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 06345fe..f2e8100 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -90,8 +90,6 @@
return snapshots_->FindUntrackedObjects();
}
- void DropCompiledCode();
-
private:
Heap* heap() const { return snapshots_->heap(); }
diff --git a/src/heap.cc b/src/heap.cc
index b75f751..b5fe184 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -412,7 +412,7 @@
this->Available() / KB,
this->CommittedMemory() / KB);
PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
- amount_of_external_allocated_memory_ / KB);
+ static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
}
@@ -6591,7 +6591,7 @@
}
-intptr_t Heap::PromotedExternalMemorySize() {
+int64_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
return amount_of_external_allocated_memory_
diff --git a/src/heap.h b/src/heap.h
index 752a1ed..ee01c22 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1475,8 +1475,8 @@
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
- inline intptr_t AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes);
+ inline int64_t AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes);
// This is only needed for testing high promotion mode.
void SetNewSpaceHighPromotionModeActive(bool mode) {
@@ -1495,7 +1495,10 @@
}
inline intptr_t PromotedTotalSize() {
- return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
+ if (total < 0) return 0;
+ return static_cast<intptr_t>(total);
}
inline intptr_t OldGenerationSpaceAvailable() {
@@ -1780,7 +1783,7 @@
bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
- intptr_t amount_of_external_allocated_memory() {
+ int64_t amount_of_external_allocated_memory() {
return amount_of_external_allocated_memory_;
}
@@ -1906,7 +1909,7 @@
int gc_post_processing_depth_;
// Returns the amount of external memory registered since last global gc.
- intptr_t PromotedExternalMemorySize();
+ int64_t PromotedExternalMemorySize();
unsigned int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
@@ -1960,10 +1963,10 @@
// The amount of external memory registered through the API kept alive
// by global handles
- intptr_t amount_of_external_allocated_memory_;
+ int64_t amount_of_external_allocated_memory_;
// Caches the amount of external memory registered at the last global gc.
- intptr_t amount_of_external_allocated_memory_at_last_global_gc_;
+ int64_t amount_of_external_allocated_memory_at_last_global_gc_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 84949c0..e816471 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -3930,8 +3930,7 @@
HInstruction* HMod::New(Zone* zone,
HValue* context,
HValue* left,
- HValue* right,
- Maybe<int> fixed_right_arg) {
+ HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -3950,7 +3949,7 @@
}
}
}
- return new(zone) HMod(context, left, right, fixed_right_arg);
+ return new(zone) HMod(context, left, right);
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 1423c72..73c1d7d 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -4901,10 +4901,7 @@
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
- HValue* right,
- Maybe<int> fixed_right_arg);
-
- Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+ HValue* right);
bool HasPowerOf2Divisor() {
if (right()->IsConstant() &&
@@ -4938,15 +4935,10 @@
private:
HMod(HValue* context,
HValue* left,
- HValue* right,
- Maybe<int> fixed_right_arg)
- : HArithmeticBinaryOperation(context, left, right),
- fixed_right_arg_(fixed_right_arg) {
+ HValue* right) : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
}
-
- const Maybe<int> fixed_right_arg_;
};
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 22da947..f7b3100 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1273,6 +1273,20 @@
}
+HValue* HGraphBuilder::BuildCheckString(
+ HValue* object, const char* failure_reason) {
+ if (!object->type().IsString()) {
+ ASSERT(!object->IsConstant() ||
+ !HConstant::cast(object)->HasStringValue());
+ IfBuilder if_isstring(this);
+ if_isstring.If<HIsStringAndBranch>(object);
+ if_isstring.Then();
+ if_isstring.ElseDeopt(failure_reason);
+ }
+ return object;
+}
+
+
HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
if (object->type().IsJSObject()) return object;
return Add<HWrapReceiver>(object, function);
@@ -1301,7 +1315,7 @@
capacity_checker.Then();
HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap));
- HValue* max_capacity = Add<HAdd>(current_capacity, max_gap);
+ HValue* max_capacity = AddUncasted<HAdd>(current_capacity, max_gap);
IfBuilder key_checker(this);
key_checker.If<HCompareNumericAndBranch>(key, max_capacity, Token::LT);
key_checker.Then();
@@ -1418,14 +1432,14 @@
int32_t offset = SeededNumberDictionary::GetProbeOffset(current_probe);
HValue* raw_index = (current_probe == 0)
? hash
- : Add<HAdd>(hash, Add<HConstant>(offset));
- raw_index = Add<HBitwise>(Token::BIT_AND, raw_index, mask);
+ : AddUncasted<HAdd>(hash, Add<HConstant>(offset));
+ raw_index = AddUncasted<HBitwise>(Token::BIT_AND, raw_index, mask);
int32_t entry_size = SeededNumberDictionary::kEntrySize;
- raw_index = Add<HMul>(raw_index, Add<HConstant>(entry_size));
+ raw_index = AddUncasted<HMul>(raw_index, Add<HConstant>(entry_size));
raw_index->ClearFlag(HValue::kCanOverflow);
int32_t base_offset = SeededNumberDictionary::kElementsStartIndex;
- HValue* key_index = Add<HAdd>(raw_index, Add<HConstant>(base_offset));
+ HValue* key_index = AddUncasted<HAdd>(raw_index, Add<HConstant>(base_offset));
key_index->ClearFlag(HValue::kCanOverflow);
HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
@@ -1450,8 +1464,8 @@
{
// Key at current probe matches. Details must be zero, otherwise the
// dictionary element requires special handling.
- HValue* details_index = Add<HAdd>(raw_index,
- Add<HConstant>(base_offset + 2));
+ HValue* details_index = AddUncasted<HAdd>(
+ raw_index, Add<HConstant>(base_offset + 2));
details_index->ClearFlag(HValue::kCanOverflow);
HValue* details = Add<HLoadKeyed>(elements, details_index,
@@ -1467,8 +1481,8 @@
{
// Key matches and details are zero --> fast case. Load and return the
// value.
- HValue* result_index = Add<HAdd>(raw_index,
- Add<HConstant>(base_offset + 1));
+ HValue* result_index = AddUncasted<HAdd>(
+ raw_index, Add<HConstant>(base_offset + 1));
result_index->ClearFlag(HValue::kCanOverflow);
Push(Add<HLoadKeyed>(elements, result_index,
@@ -1486,33 +1500,33 @@
HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
HValue* seed = Add<HConstant>(seed_value);
- HValue* hash = Add<HBitwise>(Token::BIT_XOR, index, seed);
+ HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, index, seed);
// hash = ~hash + (hash << 15);
- HValue* shifted_hash = Add<HShl>(hash, Add<HConstant>(15));
- HValue* not_hash = Add<HBitwise>(Token::BIT_XOR, hash,
- graph()->GetConstantMinus1());
- hash = Add<HAdd>(shifted_hash, not_hash);
+ HValue* shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(15));
+ HValue* not_hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash,
+ graph()->GetConstantMinus1());
+ hash = AddUncasted<HAdd>(shifted_hash, not_hash);
// hash = hash ^ (hash >> 12);
- shifted_hash = Add<HShr>(hash, Add<HConstant>(12));
- hash = Add<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+ shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(12));
+ hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
// hash = hash + (hash << 2);
- shifted_hash = Add<HShl>(hash, Add<HConstant>(2));
- hash = Add<HAdd>(hash, shifted_hash);
+ shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(2));
+ hash = AddUncasted<HAdd>(hash, shifted_hash);
// hash = hash ^ (hash >> 4);
- shifted_hash = Add<HShr>(hash, Add<HConstant>(4));
- hash = Add<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+ shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(4));
+ hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
// hash = hash * 2057;
- hash = Add<HMul>(hash, Add<HConstant>(2057));
+ hash = AddUncasted<HMul>(hash, Add<HConstant>(2057));
hash->ClearFlag(HValue::kCanOverflow);
// hash = hash ^ (hash >> 16);
- shifted_hash = Add<HShr>(hash, Add<HConstant>(16));
- return Add<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+ shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(16));
+ return AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
}
@@ -1541,6 +1555,13 @@
Handle<Type> type) {
NoObservableSideEffectsScope scope(this);
+ // Convert constant numbers at compile time.
+ if (object->IsConstant() && HConstant::cast(object)->HasNumberValue()) {
+ Handle<Object> number = HConstant::cast(object)->handle(isolate());
+ Handle<String> result = isolate()->factory()->NumberToString(number);
+ return Add<HConstant>(result);
+ }
+
// Create a joinable continuation.
HIfContinuation found(graph()->CreateBasicBlock(),
graph()->CreateBasicBlock());
@@ -1562,10 +1583,10 @@
if_objectissmi.Then();
{
// Compute hash for smi similar to smi_get_hash().
- HValue* hash = Add<HBitwise>(Token::BIT_AND, object, mask);
+ HValue* hash = AddUncasted<HBitwise>(Token::BIT_AND, object, mask);
// Load the key.
- HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
@@ -1596,11 +1617,11 @@
object, HObjectAccess::ForHeapNumberValueLowestBits());
HValue* high = Add<HLoadNamedField>(
object, HObjectAccess::ForHeapNumberValueHighestBits());
- HValue* hash = Add<HBitwise>(Token::BIT_XOR, low, high);
- hash = Add<HBitwise>(Token::BIT_AND, hash, mask);
+ HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, low, high);
+ hash = AddUncasted<HBitwise>(Token::BIT_AND, hash, mask);
// Load the key.
- HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
@@ -1646,7 +1667,7 @@
// Load the value in case of cache hit.
HValue* key_index = Pop();
- HValue* value_index = Add<HAdd>(key_index, graph()->GetConstant1());
+ HValue* value_index = AddUncasted<HAdd>(key_index, graph()->GetConstant1());
Push(Add<HLoadKeyed>(number_string_cache, value_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE));
@@ -1671,14 +1692,14 @@
STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
HValue* size = length;
if (encoding == String::TWO_BYTE_ENCODING) {
- size = Add<HShl>(length, graph()->GetConstant1());
+ size = AddUncasted<HShl>(length, graph()->GetConstant1());
size->ClearFlag(HValue::kCanOverflow);
size->SetFlag(HValue::kUint32);
}
- size = Add<HAdd>(size, Add<HConstant>(static_cast<int32_t>(
+ size = AddUncasted<HAdd>(size, Add<HConstant>(static_cast<int32_t>(
SeqString::kHeaderSize + kObjectAlignmentMask)));
size->ClearFlag(HValue::kCanOverflow);
- size = Add<HBitwise>(
+ size = AddUncasted<HBitwise>(
Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
~kObjectAlignmentMask)));
return size;
@@ -1697,9 +1718,9 @@
LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
HValue* index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT);
{
- HValue* src_index = Add<HAdd>(src_offset, index);
+ HValue* src_index = AddUncasted<HAdd>(src_offset, index);
HValue* value = Add<HSeqStringGetChar>(src_encoding, src, src_index);
- HValue* dst_index = Add<HAdd>(dst_offset, index);
+ HValue* dst_index = AddUncasted<HAdd>(dst_offset, index);
Add<HSeqStringSetChar>(dst_encoding, dst, dst_index, value);
}
loop.EndBody();
@@ -1744,11 +1765,11 @@
HObjectAccess::ForMapInstanceType());
// Compute difference of instance types.
- HValue* xored_instance_types = Add<HBitwise>(
+ HValue* xored_instance_types = AddUncasted<HBitwise>(
Token::BIT_XOR, left_instance_type, right_instance_type);
// Compute the length of the resulting string.
- HValue* length = Add<HAdd>(left_length, right_length);
+ HValue* length = AddUncasted<HAdd>(left_length, right_length);
// Check if we should create a cons string.
IfBuilder if_createcons(this);
@@ -1765,7 +1786,7 @@
CONS_STRING_TYPE);
// Compute the intersection of instance types.
- HValue* anded_instance_types = Add<HBitwise>(
+ HValue* anded_instance_types = AddUncasted<HBitwise>(
Token::BIT_AND, left_instance_type, right_instance_type);
// We create a one-byte cons string if
@@ -1781,7 +1802,7 @@
STATIC_ASSERT(kOneByteStringTag != 0);
STATIC_ASSERT(kOneByteDataHintMask != 0);
if_onebyte.If<HCompareNumericAndBranch>(
- Add<HBitwise>(
+ AddUncasted<HBitwise>(
Token::BIT_AND, anded_instance_types,
Add<HConstant>(static_cast<int32_t>(
kStringEncodingMask | kOneByteDataHintMask))),
@@ -1791,7 +1812,7 @@
kOneByteDataHintTag != 0 &&
kOneByteDataHintTag != kOneByteStringTag);
if_onebyte.If<HCompareNumericAndBranch>(
- Add<HBitwise>(
+ AddUncasted<HBitwise>(
Token::BIT_AND, xored_instance_types,
Add<HConstant>(static_cast<int32_t>(
kOneByteStringTag | kOneByteDataHintTag))),
@@ -1825,21 +1846,21 @@
if_createcons.Else();
{
// Compute union of instance types.
- HValue* ored_instance_types = Add<HBitwise>(
+ HValue* ored_instance_types = AddUncasted<HBitwise>(
Token::BIT_OR, left_instance_type, right_instance_type);
// Check if both strings have the same encoding and both are
// sequential.
IfBuilder if_sameencodingandsequential(this);
if_sameencodingandsequential.If<HCompareNumericAndBranch>(
- Add<HBitwise>(
+ AddUncasted<HBitwise>(
Token::BIT_AND, xored_instance_types,
Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
graph()->GetConstant0(), Token::EQ);
if_sameencodingandsequential.And();
STATIC_ASSERT(kSeqStringTag == 0);
if_sameencodingandsequential.If<HCompareNumericAndBranch>(
- Add<HBitwise>(
+ AddUncasted<HBitwise>(
Token::BIT_AND, ored_instance_types,
Add<HConstant>(static_cast<int32_t>(kStringRepresentationMask))),
graph()->GetConstant0(), Token::EQ);
@@ -1849,7 +1870,7 @@
IfBuilder if_onebyte(this);
STATIC_ASSERT(kOneByteStringTag != 0);
if_onebyte.If<HCompareNumericAndBranch>(
- Add<HBitwise>(
+ AddUncasted<HBitwise>(
Token::BIT_AND, ored_instance_types,
Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
graph()->GetConstant0(), Token::NE);
@@ -2172,11 +2193,11 @@
}
HConstant* elements_size_value = Add<HConstant>(elements_size);
- HValue* mul = Add<HMul>(capacity, elements_size_value);
+ HValue* mul = AddUncasted<HMul>(capacity, elements_size_value);
mul->ClearFlag(HValue::kCanOverflow);
HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
- HValue* total_size = Add<HAdd>(mul, header_size);
+ HValue* total_size = AddUncasted<HAdd>(mul, header_size);
total_size->ClearFlag(HValue::kCanOverflow);
return Add<HAllocate>(total_size, HType::JSArray(),
@@ -4504,7 +4525,7 @@
set_current_block(body_exit);
HValue* current_index = Pop();
- Push(Add<HAdd>(current_index, graph()->GetConstant1()));
+ Push(AddUncasted<HAdd>(current_index, graph()->GetConstant1()));
body_exit = current_block();
}
@@ -5934,12 +5955,7 @@
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- AddInstruction(instr);
- Push(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
- }
+ Push(BuildBinaryOperation(operation, left, right));
BuildStore(expr, prop, expr->id(),
expr->AssignmentId(), expr->IsUninitialized());
} else {
@@ -8575,7 +8591,7 @@
}
-HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
+HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
HValue* right) {
@@ -8584,12 +8600,22 @@
Handle<Type> result_type = expr->bounds().lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
- return HGraphBuilder::BuildBinaryOperation(expr->op(), left, right,
- left_type, right_type, result_type, fixed_right_arg);
+ HValue* result = HGraphBuilder::BuildBinaryOperation(
+ expr->op(), left, right, left_type, right_type,
+ result_type, fixed_right_arg);
+ // Add a simulate after instructions with observable side effects, and
+ // after phis, which are the result of BuildBinaryOperation when we
+ // inlined some complex subgraph.
+ if (result->HasObservableSideEffects() || result->IsPhi()) {
+ Push(result);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Drop(1);
+ }
+ return result;
}
-HInstruction* HGraphBuilder::BuildBinaryOperation(
+HValue* HGraphBuilder::BuildBinaryOperation(
Token::Value op,
HValue* left,
HValue* right,
@@ -8631,18 +8657,14 @@
(left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
// Validate type feedback for left argument.
if (left_type->Is(Type::String())) {
- IfBuilder if_isstring(this);
- if_isstring.If<HIsStringAndBranch>(left);
- if_isstring.Then();
- if_isstring.ElseDeopt("Expected string for LHS of binary operation");
+ left = BuildCheckString(
+ left, "Expected string for LHS of binary operation");
}
// Validate type feedback for right argument.
if (right_type->Is(Type::String())) {
- IfBuilder if_isstring(this);
- if_isstring.If<HIsStringAndBranch>(right);
- if_isstring.Then();
- if_isstring.ElseDeopt("Expected string for RHS of binary operation");
+ right = BuildCheckString(
+ right, "Expected string for RHS of binary operation");
}
// Convert left argument as necessary.
@@ -8654,7 +8676,7 @@
HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
Add<HPushArgument>(left);
Add<HPushArgument>(right);
- return NewUncasted<HInvokeFunction>(function, 2);
+ return AddUncasted<HInvokeFunction>(function, 2);
}
// Convert right argument as necessary.
@@ -8666,10 +8688,10 @@
HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
Add<HPushArgument>(left);
Add<HPushArgument>(right);
- return NewUncasted<HInvokeFunction>(function, 2);
+ return AddUncasted<HInvokeFunction>(function, 2);
}
- return NewUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_NONE);
+ return AddUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_NONE);
}
if (binop_stub) {
@@ -8690,51 +8712,66 @@
HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
Add<HPushArgument>(left);
Add<HPushArgument>(right);
- instr = NewUncasted<HInvokeFunction>(function, 2);
+ instr = AddUncasted<HInvokeFunction>(function, 2);
} else {
switch (op) {
case Token::ADD:
- instr = NewUncasted<HAdd>(left, right);
+ instr = AddUncasted<HAdd>(left, right);
break;
case Token::SUB:
- instr = NewUncasted<HSub>(left, right);
+ instr = AddUncasted<HSub>(left, right);
break;
case Token::MUL:
- instr = NewUncasted<HMul>(left, right);
+ instr = AddUncasted<HMul>(left, right);
break;
- case Token::MOD:
- instr = NewUncasted<HMod>(left, right, fixed_right_arg);
+ case Token::MOD: {
+ if (fixed_right_arg.has_value) {
+ if (right->IsConstant()) {
+ ASSERT_EQ(fixed_right_arg.value,
+ HConstant::cast(right)->Integer32Value());
+ } else {
+ HConstant* fixed_right = Add<HConstant>(
+ static_cast<int>(fixed_right_arg.value));
+ IfBuilder if_same(this);
+ if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
+ if_same.Then();
+ if_same.ElseDeopt("Unexpected RHS of binary operation");
+ right = fixed_right;
+ }
+ }
+ instr = AddUncasted<HMod>(left, right);
break;
+ }
case Token::DIV:
- instr = NewUncasted<HDiv>(left, right);
+ instr = AddUncasted<HDiv>(left, right);
break;
case Token::BIT_XOR:
case Token::BIT_AND:
- instr = NewUncasted<HBitwise>(op, left, right);
+ instr = AddUncasted<HBitwise>(op, left, right);
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
if (left_type->Is(Type::Signed32()) &&
right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = NewUncasted<HRor>(operand, shift_amount);
+ instr = AddUncasted<HRor>(operand, shift_amount);
} else {
- instr = NewUncasted<HBitwise>(op, left, right);
+ instr = AddUncasted<HBitwise>(op, left, right);
}
break;
}
case Token::SAR:
- instr = NewUncasted<HSar>(left, right);
+ instr = AddUncasted<HSar>(left, right);
break;
case Token::SHR:
- instr = NewUncasted<HShr>(left, right);
+ instr = AddUncasted<HShr>(left, right);
if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
CanBeZero(right)) {
graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
- instr = NewUncasted<HShl>(left, right);
+ instr = AddUncasted<HShl>(left, right);
break;
default:
UNREACHABLE();
@@ -8910,12 +8947,12 @@
SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* instr = BuildBinaryOperation(expr, left, right);
- if (FLAG_emit_opt_code_positions && instr->IsBinaryOperation()) {
- HBinaryOperation::cast(instr)->SetOperandPositions(
+ HValue* result = BuildBinaryOperation(expr, left, right);
+ if (FLAG_emit_opt_code_positions && result->IsBinaryOperation()) {
+ HBinaryOperation::cast(result)->SetOperandPositions(
zone(), expr->left()->position(), expr->right()->position());
}
- return ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnValue(result);
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 9aa9489..a117c55 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1256,6 +1256,7 @@
HValue* BuildCheckHeapObject(HValue* object);
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
+ HValue* BuildCheckString(HValue* object, const char* failure_reason);
HValue* BuildWrapReceiver(HValue* object, HValue* function);
// Building common constructs
@@ -1338,14 +1339,14 @@
HValue** operand,
HValue** shift_amount);
- HInstruction* BuildBinaryOperation(Token::Value op,
- HValue* left,
- HValue* right,
- Handle<Type> left_type,
- Handle<Type> right_type,
- Handle<Type> result_type,
- Maybe<int> fixed_right_arg,
- bool binop_stub = false);
+ HValue* BuildBinaryOperation(Token::Value op,
+ HValue* left,
+ HValue* right,
+ Handle<Type> left_type,
+ Handle<Type> right_type,
+ Handle<Type> result_type,
+ Maybe<int> fixed_right_arg,
+ bool binop_stub = false);
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
@@ -2274,9 +2275,9 @@
HInstruction* BuildStringCharCodeAt(HValue* string,
HValue* index);
- HInstruction* BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right);
+ HValue* BuildBinaryOperation(BinaryOperation* expr,
+ HValue* left,
+ HValue* right);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
HInstruction* BuildLoadKeyedGeneric(HValue* object,
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index bacfe83..86c525d 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1575,8 +1575,7 @@
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- int depth = 1;
- expr->BuildConstantProperties(isolate(), &depth);
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
@@ -1586,7 +1585,7 @@
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- depth > 1 || Serializer::enabled() ||
+ expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1705,8 +1704,7 @@
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- int depth = 1;
- expr->BuildConstantElements(isolate(), &depth);
+ expr->BuildConstantElements(isolate());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1733,7 +1731,7 @@
DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
- } else if (depth > 1 || Serializer::enabled() ||
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 654c04d..f316e85 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1399,36 +1399,6 @@
__ bind(&left_is_not_negative);
__ and_(left_reg, divisor - 1);
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
- Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmp(right_reg, Immediate(divisor));
- DeoptimizeIf(not_equal, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ neg(left_reg);
- __ and_(left_reg, divisor - 1);
- __ neg(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&left_is_not_negative);
- __ and_(left_reg, divisor - 1);
- __ bind(&done);
-
} else {
Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(eax));
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 5fcd0a2..d4c17ab 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1558,10 +1558,6 @@
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegisterAtStart(right),
- NULL);
return AssignEnvironment(DefineSameAsFirst(mod));
} else {
// The temporary operand is necessary to ensure that right is not
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 16c2a18..adbef17 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -451,22 +451,16 @@
}
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context);
+
+
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] - esp[28] : FunctionCallbackInfo, incl.
- // : object passing the type check
- // (set by CheckPrototypes)
- // -- esp[32] : last argument
- // -- ...
- // -- esp[(argc + 7) * 4] : first argument
- // -- esp[(argc + 8) * 4] : receiver
- // -----------------------------------
-
+ int argc) {
typedef FunctionCallbackArguments FCA;
// Save calling context.
__ mov(Operand(esp, (1 + FCA::kContextSaveIndex) * kPointerSize), esi);
@@ -499,6 +493,110 @@
STATIC_ASSERT(kFastApiCallArguments == 7);
__ lea(eax, Operand(esp, 1 * kPointerSize));
+ GenerateFastApiCallBody(masm, optimization, argc, false);
+}
+
+
+// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+
+ // Copy return value.
+ __ pop(scratch1);
+
+ // receiver
+ __ push(receiver);
+
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch1.is(arg));
+ ASSERT(!scratch2.is(arg));
+ ASSERT(!scratch3.is(arg));
+ __ push(arg);
+ }
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // context save
+ __ push(esi);
+
+ // Get the function and setup the context.
+ Handle<JSFunction> function = optimization.constant_function();
+ __ LoadHeapObject(scratch2, function);
+ __ mov(esi, FieldOperand(scratch2, JSFunction::kContextOffset));
+ // callee
+ __ push(scratch2);
+
+ Isolate* isolate = masm->isolate();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data(), isolate);
+ // Push data from ExecutableAccessorInfo.
+ if (isolate->heap()->InNewSpace(*call_data)) {
+ __ mov(scratch2, api_call_info);
+ __ mov(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
+ __ push(scratch3);
+ } else {
+ __ push(Immediate(call_data));
+ }
+ // return value
+ __ push(Immediate(isolate->factory()->undefined_value()));
+ // return value default
+ __ push(Immediate(isolate->factory()->undefined_value()));
+ // isolate
+ __ push(Immediate(reinterpret_cast<int>(isolate)));
+ // holder
+ __ push(receiver);
+
+ // store receiver address for GenerateFastApiCallBody
+ ASSERT(!scratch1.is(eax));
+ __ mov(eax, esp);
+
+ // return address
+ __ push(scratch1);
+
+ GenerateFastApiCallBody(masm, optimization, argc, true);
+}
+
+
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] - esp[28] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- esp[32] : last argument
+ // -- ...
+ // -- esp[(argc + 7) * 4] : first argument
+ // -- esp[(argc + 8) * 4] : receiver
+ //
+ // -- eax : receiver address
+ // -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+
// API function gets reference to the v8::Arguments. If CPU profiler
// is enabled wrapper function will be called and we need to pass
// address of the callback as additional parameter, always allocate
@@ -509,6 +607,8 @@
// it's not controlled by GC.
const int kApiStackSpace = 4;
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
@@ -543,40 +643,6 @@
}
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- const int stack_space = kFastApiCallArguments + argc + 1;
- const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
- // Copy return value.
- __ mov(scratch, Operand(esp, 0));
- // Assign stack space for the call arguments.
- __ sub(esp, Immediate(stack_space * kPointerSize));
- // Move the return address on top of the stack.
- __ mov(Operand(esp, 0), scratch);
- // Write holder to stack frame.
- __ mov(Operand(esp, kHolderIndex * kPointerSize), receiver);
- // Write receiver to stack frame.
- int index = stack_space;
- __ mov(Operand(esp, index-- * kPointerSize), receiver);
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ mov(Operand(esp, index-- * kPointerSize), values[i]);
- }
-
- GenerateFastApiCall(masm, optimization, argc, true);
-}
-
-
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -687,7 +753,7 @@
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
@@ -1370,7 +1436,8 @@
void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+ masm(), call_optimization, receiver(), scratch1(),
+ scratch2(), name(), 0, NULL);
}
@@ -2603,7 +2670,7 @@
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- GenerateFastApiCall(masm(), optimization, argc, false);
+ GenerateFastApiCall(masm(), optimization, argc);
__ bind(&miss);
__ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
@@ -2920,7 +2987,8 @@
Register values[] = { value() };
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch1(), 1, values);
+ masm(), call_optimization, receiver(), scratch1(),
+ scratch2(), this->name(), 1, values);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/src/ic.cc b/src/ic.cc
index 9b30405..53c103a 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1237,15 +1237,12 @@
return isolate()->builtins()->LoadIC_Normal();
case CALLBACKS: {
// Use simple field loads for some well-known callback properties.
- int object_offset;
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<Map> map(receiver->map());
+ int object_offset;
if (Accessors::IsJSObjectFieldAccessor(map, name, &object_offset)) {
- PropertyIndex index =
- PropertyIndex::NewHeaderIndex(object_offset / kPointerSize);
- return compiler.CompileLoadField(
- receiver, receiver, name, index, Representation::Tagged());
+ return SimpleFieldLoad(object_offset / kPointerSize);
}
}
diff --git a/src/isolate.cc b/src/isolate.cc
index 700ca87..7250246 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -29,7 +29,6 @@
#include "v8.h"
-#include "allocation-inl.h"
#include "ast.h"
#include "bootstrapper.h"
#include "codegen.h"
@@ -131,189 +130,6 @@
}
-// Create a dummy thread that will wait forever on a semaphore. The only
-// purpose for this thread is to have some stack area to save essential data
-// into for use by a stacks only core dump (aka minidump).
-class PreallocatedMemoryThread: public Thread {
- public:
- char* data() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return data_;
- }
-
- unsigned length() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return length_;
- }
-
- // Stop the PreallocatedMemoryThread and release its resources.
- void StopThread() {
- keep_running_ = false;
- wait_for_ever_semaphore_->Signal();
-
- // Wait for the thread to terminate.
- Join();
-
- if (data_ready_semaphore_ != NULL) {
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
-
- delete wait_for_ever_semaphore_;
- wait_for_ever_semaphore_ = NULL;
- }
-
- protected:
- // When the thread starts running it will allocate a fixed number of bytes
- // on the stack and publish the location of this memory for others to use.
- void Run() {
- EmbeddedVector<char, 15 * 1024> local_buffer;
-
- // Initialize the buffer with a known good value.
- OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
- local_buffer.length());
-
- // Publish the local buffer and signal its availability.
- data_ = local_buffer.start();
- length_ = local_buffer.length();
- data_ready_semaphore_->Signal();
-
- while (keep_running_) {
- // This thread will wait here until the end of time.
- wait_for_ever_semaphore_->Wait();
- }
-
- // Make sure we access the buffer after the wait to remove all possibility
- // of it being optimized away.
- OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
- local_buffer.length());
- }
-
-
- private:
- PreallocatedMemoryThread()
- : Thread("v8:PreallocMem"),
- keep_running_(true),
- wait_for_ever_semaphore_(new Semaphore(0)),
- data_ready_semaphore_(new Semaphore(0)),
- data_(NULL),
- length_(0) {
- }
-
- // Used to make sure that the thread keeps looping even for spurious wakeups.
- bool keep_running_;
-
- // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
- Semaphore* wait_for_ever_semaphore_;
- // Semaphore to signal that the data has been initialized.
- Semaphore* data_ready_semaphore_;
-
- // Location and size of the preallocated memory block.
- char* data_;
- unsigned length_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
-};
-
-
-void Isolate::PreallocatedMemoryThreadStart() {
- if (preallocated_memory_thread_ != NULL) return;
- preallocated_memory_thread_ = new PreallocatedMemoryThread();
- preallocated_memory_thread_->Start();
-}
-
-
-void Isolate::PreallocatedMemoryThreadStop() {
- if (preallocated_memory_thread_ == NULL) return;
- preallocated_memory_thread_->StopThread();
- // Done with the thread entirely.
- delete preallocated_memory_thread_;
- preallocated_memory_thread_ = NULL;
-}
-
-
-void Isolate::PreallocatedStorageInit(size_t size) {
- ASSERT(free_list_.next_ == &free_list_);
- ASSERT(free_list_.previous_ == &free_list_);
- PreallocatedStorage* free_chunk =
- reinterpret_cast<PreallocatedStorage*>(new char[size]);
- free_list_.next_ = free_list_.previous_ = free_chunk;
- free_chunk->next_ = free_chunk->previous_ = &free_list_;
- free_chunk->size_ = size - sizeof(PreallocatedStorage);
- preallocated_storage_preallocated_ = true;
-}
-
-
-void* Isolate::PreallocatedStorageNew(size_t size) {
- if (!preallocated_storage_preallocated_) {
- return FreeStoreAllocationPolicy().New(size);
- }
- ASSERT(free_list_.next_ != &free_list_);
- ASSERT(free_list_.previous_ != &free_list_);
-
- size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
- // Search for exact fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ == size) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Search for first fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- PreallocatedStorage* left_over =
- reinterpret_cast<PreallocatedStorage*>(
- reinterpret_cast<char*>(storage + 1) + size);
- left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
- ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
- storage->size_);
- storage->size_ = size;
- left_over->LinkTo(&free_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Allocation failure.
- ASSERT(false);
- return NULL;
-}
-
-
-// We don't attempt to coalesce.
-void Isolate::PreallocatedStorageDelete(void* p) {
- if (p == NULL) {
- return;
- }
- if (!preallocated_storage_preallocated_) {
- FreeStoreAllocationPolicy::Delete(p);
- return;
- }
- PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
- ASSERT(storage->next_->previous_ == storage);
- ASSERT(storage->previous_->next_ == storage);
- storage->Unlink();
- storage->LinkTo(&free_list_);
-}
-
Isolate* Isolate::default_isolate_ = NULL;
Thread::LocalStorageKey Isolate::isolate_key_;
Thread::LocalStorageKey Isolate::thread_id_key_;
@@ -837,24 +653,12 @@
}
-void Isolate::PrintStack() {
- PrintStack(stdout);
-}
-
-
void Isolate::PrintStack(FILE* out) {
if (stack_trace_nesting_level_ == 0) {
stack_trace_nesting_level_++;
-
- StringAllocator* allocator;
- if (preallocated_message_space_ == NULL) {
- allocator = new HeapStringAllocator();
- } else {
- allocator = preallocated_message_space_;
- }
-
StringStream::ClearMentionedObjectCache(this);
- StringStream accumulator(allocator);
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
accumulator.OutputToFile(out);
@@ -862,10 +666,6 @@
accumulator.Log(this);
incomplete_message_ = NULL;
stack_trace_nesting_level_ = 0;
- if (preallocated_message_space_ == NULL) {
- // Remove the HeapStringAllocator created above.
- delete allocator;
- }
} else if (stack_trace_nesting_level_ == 1) {
stack_trace_nesting_level_++;
OS::PrintError(
@@ -1719,13 +1519,11 @@
Isolate::Isolate()
- : state_(UNINITIALIZED),
- embedder_data_(NULL),
+ : embedder_data_(),
+ state_(UNINITIALIZED),
entry_stack_(NULL),
stack_trace_nesting_level_(0),
incomplete_message_(NULL),
- preallocated_memory_thread_(NULL),
- preallocated_message_space_(NULL),
bootstrapper_(NULL),
runtime_profiler_(NULL),
compilation_cache_(NULL),
@@ -1747,9 +1545,6 @@
handle_scope_implementer_(NULL),
unicode_cache_(NULL),
runtime_zone_(this),
- in_use_list_(0),
- free_list_(0),
- preallocated_storage_preallocated_(false),
inner_pointer_to_code_cache_(NULL),
write_iterator_(NULL),
global_handles_(NULL),
@@ -1902,11 +1697,6 @@
builtins_.TearDown();
bootstrapper_->TearDown();
- // Remove the external reference to the preallocated stack memory.
- delete preallocated_message_space_;
- preallocated_message_space_ = NULL;
- PreallocatedMemoryThreadStop();
-
if (runtime_profiler_ != NULL) {
runtime_profiler_->TearDown();
delete runtime_profiler_;
@@ -2248,17 +2038,6 @@
}
}
- // Only preallocate on the first initialization.
- if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
- // Start the thread which will set aside some memory.
- PreallocatedMemoryThreadStart();
- preallocated_message_space_ =
- new NoAllocationStringAllocator(
- preallocated_memory_thread_->data(),
- preallocated_memory_thread_->length());
- PreallocatedStorageInit(preallocated_memory_thread_->length() / 4);
- }
-
if (FLAG_preemption) {
v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
v8::Locker::StartPreemption(reinterpret_cast<v8::Isolate*>(this), 100);
diff --git a/src/isolate.h b/src/isolate.h
index 1d45de9..ed568b7 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -76,7 +76,6 @@
class InlineRuntimeFunctionsTable;
class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
-class PreallocatedMemoryThread;
class RandomNumberGenerator;
class RegExpStack;
class SaveContext;
@@ -331,7 +330,7 @@
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
+typedef List<HeapObject*> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* SerializerDeserializer state. */ \
@@ -717,10 +716,8 @@
}
void PrintCurrentStackTrace(FILE* out);
- void PrintStackTrace(FILE* out, char* thread_data);
void PrintStack(StringStream* accumulator);
void PrintStack(FILE* out);
- void PrintStack();
Handle<String> StackTraceString();
NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
Object* object,
@@ -978,10 +975,6 @@
return &interp_canonicalize_mapping_;
}
- void* PreallocatedStorageNew(size_t size);
- void PreallocatedStorageDelete(void* p);
- void PreallocatedStorageInit(size_t size);
-
inline bool IsCodePreAgingActive();
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1050,8 +1043,14 @@
thread_local_top_.current_vm_state_ = state;
}
- void SetData(void* data) { embedder_data_ = data; }
- void* GetData() { return embedder_data_; }
+ void SetData(uint32_t slot, void* data) {
+ ASSERT(slot < Internals::kNumIsolateDataSlots);
+ embedder_data_[slot] = data;
+ }
+ void* GetData(uint32_t slot) {
+ ASSERT(slot < Internals::kNumIsolateDataSlots);
+ return embedder_data_[slot];
+ }
LookupResult* top_lookup_result() {
return thread_local_top_.top_lookup_result_;
@@ -1164,9 +1163,9 @@
// These fields are accessed through the API, offsets must be kept in sync
// with v8::internal::Internals (in include/v8.h) constants. This is also
// verified in Isolate::Init() using runtime checks.
- State state_; // Will be padded to kApiPointerSize.
- void* embedder_data_;
+ void* embedder_data_[Internals::kNumIsolateDataSlots];
Heap heap_;
+ State state_; // Will be padded to kApiPointerSize.
// The per-process lock should be acquired before the ThreadDataTable is
// modified.
@@ -1242,11 +1241,8 @@
// at the same time, this should be prevented using external locking.
void Exit();
- void PreallocatedMemoryThreadStart();
- void PreallocatedMemoryThreadStop();
void InitializeThreadLocal();
- void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
void MarkCompactPrologue(bool is_compacting,
ThreadLocalTop* archived_thread_data);
void MarkCompactEpilogue(bool is_compacting,
@@ -1266,10 +1262,7 @@
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
- // The preallocated memory thread singleton.
- PreallocatedMemoryThread* preallocated_memory_thread_;
Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
- NoAllocationStringAllocator* preallocated_message_space_;
Bootstrapper* bootstrapper_;
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
@@ -1296,9 +1289,6 @@
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
Zone runtime_zone_;
- PreallocatedStorage in_use_list_;
- PreallocatedStorage free_list_;
- bool preallocated_storage_preallocated_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
ConsStringIteratorOp* write_iterator_;
GlobalHandles* global_handles_;
diff --git a/src/lithium.cc b/src/lithium.cc
index ee8ea3e..414d5f4 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -237,7 +237,8 @@
if (index >= 0) {
// Local or spill slot. Skip the frame pointer, function, and
// context in the fixed part of the frame.
- return -(index + 3) * kPointerSize;
+ return -(index + 1) * kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeFromFp;
} else {
// Incoming parameter. Skip the return address.
return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
diff --git a/src/math.js b/src/math.js
index e1798fa..2df0ec2 100644
--- a/src/math.js
+++ b/src/math.js
@@ -217,16 +217,19 @@
// Also define the initialization function that populates the lookup table
// and then wires up the function definitions.
function SetupTrigonometricFunctions() {
- // TODO(yangguo): The following table size has been chosen to satisfy
- // Sunspider's brittle result verification. Reconsider relevance.
- var samples = 4489;
- var pi = 3.1415926535897932;
- var pi_half = pi / 2;
- var inverse_pi_half = 2 / pi;
- var two_pi = 2 * pi;
- var four_pi = 4 * pi;
- var interval = pi_half / samples;
- var inverse_interval = samples / pi_half;
+ var samples = 1800; // Table size. Do not change arbitrarily.
+ var inverse_pi_half = 0.636619772367581343; // 2 / pi
+ var inverse_pi_half_s_26 = 9.48637384723993156e-9; // 2 / pi / (2^26)
+ var s_26 = 1 << 26;
+ var two_step_threshold = 1 << 27;
+ var index_convert = 1145.915590261646418; // samples / (pi / 2)
+ // pi / 2 rounded up
+ var pi_half = 1.570796326794896780; // 0x192d4454fb21f93f
+ // We use two parts for pi/2 to emulate a higher precision.
+ // pi_half_1 only has 26 significant bits for mantissa.
+ // Note that pi_half > pi_half_1 + pi_half_2
+ var pi_half_1 = 1.570796325802803040; // 0x00000054fb21f93f
+ var pi_half_2 = 9.920935796805404252e-10; // 0x3326a611460b113e
var table_sin;
var table_cos_interval;
@@ -234,6 +237,9 @@
// 1) Multiplication takes care of to-number conversion.
// 2) Reduce x to the first quadrant [0, pi/2].
// Conveniently enough, in case of +/-Infinity, we get NaN.
+ // Note that we try to use only 26 instead of 52 significant bits for
+ // mantissa to avoid rounding errors when multiplying. For very large
+ // input we therefore have additional steps.
// 3) Replace x by (pi/2-x) if x was in the 2nd or 4th quadrant.
// 4) Do a table lookup for the closest samples to the left and right of x.
// 5) Find the derivatives at those sampling points by table lookup:
@@ -241,8 +247,30 @@
// 6) Use cubic spline interpolation to approximate sin(x).
// 7) Negate the result if x was in the 3rd or 4th quadrant.
// 8) Get rid of -0 by adding 0.
- var Interpolation = function(x) {
- var double_index = x * inverse_interval;
+ var Interpolation = function(x, phase) {
+ if (x < 0 || x > pi_half) {
+ var multiple;
+ while (x < -two_step_threshold || x > two_step_threshold) {
+ // Let's assume this loop does not terminate.
+ // All numbers x in each loop forms a set S.
+ // (1) abs(x) > 2^27 for all x in S.
+ // (2) abs(multiple) != 0 since (2^27 * inverse_pi_half_s26) > 1
+ // (3) multiple is rounded down in 2^26 steps, so the rounding error is
+ // at most max(ulp, 2^26).
+ // (4) so for x > 2^27, we subtract at most (1+pi/4)x and at least
+ // (1-pi/4)x
+ // (5) The subtraction results in x' so that abs(x') <= abs(x)*pi/4.
+ // Note that this difference cannot be simply rounded off.
+ // Set S cannot exist since (5) violates (1). Loop must terminate.
+ multiple = MathFloor(x * inverse_pi_half_s_26) * s_26;
+ x = x - multiple * pi_half_1 - multiple * pi_half_2;
+ }
+ multiple = MathFloor(x * inverse_pi_half);
+ x = x - multiple * pi_half_1 - multiple * pi_half_2;
+ phase += multiple;
+ }
+ var double_index = x * index_convert;
+ if (phase & 1) double_index = samples - double_index;
var index = double_index | 0;
var t1 = double_index - index;
var t2 = 1 - t1;
@@ -251,26 +279,20 @@
var dy = y2 - y1;
return (t2 * y1 + t1 * y2 +
t1 * t2 * ((table_cos_interval[index] - dy) * t2 +
- (dy - table_cos_interval[index + 1]) * t1));
+ (dy - table_cos_interval[index + 1]) * t1))
+ * (1 - (phase & 2)) + 0;
}
var MathSinInterpolation = function(x) {
- // This is to make Sunspider's result verification happy.
- if (x > four_pi) x -= four_pi;
- var multiple = MathFloor(x * inverse_pi_half);
- if (%_IsMinusZero(multiple)) return multiple;
- x = (multiple & 1) * pi_half +
- (1 - ((multiple & 1) << 1)) * (x - multiple * pi_half);
- return Interpolation(x) * (1 - (multiple & 2)) + 0;
+ x = x * 1; // Convert to number and deal with -0.
+ if (%_IsMinusZero(x)) return x;
+ return Interpolation(x, 0);
}
- // Cosine is sine with a phase offset of pi/2.
+ // Cosine is sine with a phase offset.
var MathCosInterpolation = function(x) {
- var multiple = MathFloor(x * inverse_pi_half);
- var phase = multiple + 1;
- x = (phase & 1) * pi_half +
- (1 - ((phase & 1) << 1)) * (x - multiple * pi_half);
- return Interpolation(x) * (1 - (phase & 2)) + 0;
+ x = MathAbs(x); // Convert to number and get rid of -0.
+ return Interpolation(x, 1);
};
%SetInlineBuiltinFlag(Interpolation);
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 2fe081e..85588e8 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -122,7 +122,7 @@
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
+ __ SmiTst(a2, t0);
__ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
@@ -152,7 +152,7 @@
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
+ __ SmiTst(a2, t0);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
@@ -879,7 +879,7 @@
// Perform prologue operations usually performed by the young code stub.
__ Push(ra, fp, cp, a1);
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub.
__ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize));
@@ -1208,11 +1208,13 @@
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
@@ -1371,7 +1373,8 @@
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
- __ Addu(fp, sp, Operand(3 * kPointerSize));
+ __ Addu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1381,7 +1384,8 @@
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ lw(a1, MemOperand(fp, -3 * kPointerSize));
+ __ lw(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
__ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
@@ -1479,7 +1483,9 @@
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ sll(t2, a2, kPointerSizeLog2);
__ Subu(a2, fp, Operand(t2));
- __ Addu(a2, a2, Operand(-4 * kPointerSize)); // Adjust for frame.
+ // Adjust for frame.
+ __ Subu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index e28eb7a..47afeb4 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -2969,7 +2969,7 @@
// Check that the RegExp has been compiled (data contains a fixed array).
__ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- __ And(t0, regexp_data, Operand(kSmiTagMask));
+ __ SmiTst(regexp_data, t0);
__ Check(nz,
kUnexpectedTypeForRegExpDataFixedArrayExpected,
t0,
@@ -6164,7 +6164,7 @@
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
@@ -6253,7 +6253,7 @@
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 904a37d..221a0e1 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -618,7 +618,8 @@
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
patcher.masm()->Push(ra, fp, cp, a1);
patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
+ patcher.masm()->Addu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
initialized = true;
}
return byte_sequence;
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 437bf3a..55951b5 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -154,7 +154,8 @@
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -3 * kPointerSize;
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 9d93aaf..6ef871f 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1645,8 +1645,7 @@
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- int depth = 1;
- expr->BuildConstantProperties(isolate(), &depth);
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
@@ -1661,7 +1660,7 @@
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- depth > 1 || Serializer::enabled() ||
+ expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(a3, a2, a1, a0);
@@ -1780,8 +1779,7 @@
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- int depth = 1;
- expr->BuildConstantElements(isolate(), &depth);
+ expr->BuildConstantElements(isolate());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
@@ -1808,7 +1806,7 @@
__ CallStub(&stub);
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
1, a1, a2);
- } else if (depth > 1 || Serializer::enabled() ||
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ Push(a3, a2, a1);
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
@@ -2916,7 +2914,7 @@
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(t0, v0, Operand(kSmiTagMask));
+ __ SmiTst(v0, t0);
Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2937,7 +2935,7 @@
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
+ __ NonNegativeSmiTst(v0, at);
Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3530,9 +3528,9 @@
__ Pop(index, value);
if (FLAG_debug_code) {
- __ And(at, value, Operand(kSmiTagMask));
+ __ SmiTst(value, at);
__ ThrowIf(ne, kNonSmiValue, at, Operand(zero_reg));
- __ And(at, index, Operand(kSmiTagMask));
+ __ SmiTst(index, at);
__ ThrowIf(ne, kNonSmiIndex, at, Operand(zero_reg));
__ SmiUntag(index, index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
@@ -3567,9 +3565,9 @@
__ Pop(index, value);
if (FLAG_debug_code) {
- __ And(at, value, Operand(kSmiTagMask));
+ __ SmiTst(value, at);
__ ThrowIf(ne, kNonSmiValue, at, Operand(zero_reg));
- __ And(at, index, Operand(kSmiTagMask));
+ __ SmiTst(index, at);
__ ThrowIf(ne, kNonSmiIndex, at, Operand(zero_reg));
__ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 7dc341f..bc5d62a 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -257,7 +257,7 @@
__ MultiPush(cp.bit() | fp.bit() | ra.bit());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -309,7 +309,7 @@
ASSERT(info()->IsStub());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ Call(t9);
}
} else {
@@ -1091,35 +1091,6 @@
__ bind(&left_is_not_negative);
__ And(result_reg, left_reg, divisor - 1);
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
- const Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
- &left_is_not_negative, ge, left_reg, Operand(zero_reg));
- __ subu(result_reg, zero_reg, left_reg);
- __ And(result_reg, result_reg, divisor - 1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result_reg, zero_reg, result_reg);
- }
-
- __ bind(&left_is_not_negative);
- __ And(result_reg, left_reg, divisor - 1);
- __ bind(&done);
-
} else {
const Register scratch = scratch0();
const Register left_reg = ToRegister(instr->left());
@@ -1733,7 +1704,7 @@
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
- __ And(at, object, Operand(kSmiTagMask));
+ __ SmiTst(object, at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
@@ -2144,7 +2115,7 @@
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
- __ And(at, reg, Operand(kSmiTagMask));
+ __ SmiTst(reg, at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
@@ -3237,7 +3208,7 @@
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ And(scratch, result, Operand(kSmiTagMask));
+ __ SmiTst(result, scratch);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
@@ -3386,7 +3357,7 @@
__ Branch(&global_object, eq, receiver, Operand(scratch));
// Deoptimize if the receiver is not a JS object.
- __ And(scratch, receiver, Operand(kSmiTagMask));
+ __ SmiTst(receiver, scratch);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
@@ -4164,7 +4135,7 @@
if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ And(scratch, value, Operand(kSmiTagMask));
+ __ SmiTst(value, scratch);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
}
} else if (FLAG_track_double_fields && representation.IsDouble()) {
@@ -5159,7 +5130,7 @@
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ __ SmiTst(ToRegister(input), at);
DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
}
@@ -5167,7 +5138,7 @@
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->IsHeapObject()) {
LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ __ SmiTst(ToRegister(input), at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
}
@@ -5240,7 +5211,7 @@
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
- __ And(at, scratch0(), Operand(kSmiTagMask));
+ __ SmiTst(scratch0(), at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index fb81627..abdc3e6 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1460,10 +1460,6 @@
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- return AssignEnvironment(DefineAsRegister(mod));
} else {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right),
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 930afcb..cb336f3 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -4516,7 +4516,7 @@
Push(ra, fp, cp);
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
- Addu(fp, sp, Operand(2 * kPointerSize));
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
} else {
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
@@ -4539,7 +4539,7 @@
Push(ra, fp, cp, a1);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
// Adjust fp to point to caller's fp.
- Addu(fp, sp, Operand(2 * kPointerSize));
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
}
@@ -4554,7 +4554,9 @@
sw(cp, MemOperand(sp, 2 * kPointerSize));
sw(t8, MemOperand(sp, 1 * kPointerSize));
sw(t9, MemOperand(sp, 0 * kPointerSize));
- addiu(fp, sp, 3 * kPointerSize);
+ // Adjust FP to point to saved FP.
+ Addu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -4843,7 +4845,7 @@
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
+ SmiTst(object, t0);
Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
@@ -4857,7 +4859,7 @@
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
+ SmiTst(object, t0);
Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
@@ -5057,7 +5059,7 @@
Register scratch,
uint32_t encoding_mask) {
Label is_object;
- And(at, string, Operand(kSmiTagMask));
+ SmiTst(string, at);
ThrowIf(eq, kNonObject, at, Operand(zero_reg));
lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
@@ -5071,9 +5073,7 @@
// string length without using a temp register, it is restored at the end of
// this function.
Label index_tag_ok, index_tag_bad;
- // On ARM TrySmiTag is used here.
- AdduAndCheckForOverflow(index, index, index, scratch);
- BranchOnOverflow(&index_tag_bad, scratch);
+ TrySmiTag(index, scratch, &index_tag_bad);
Branch(&index_tag_ok);
bind(&index_tag_bad);
Throw(kIndexIsTooLarge);
@@ -5082,8 +5082,8 @@
lw(at, FieldMemOperand(string, String::kLengthOffset));
ThrowIf(ge, kIndexIsTooLarge, index, Operand(at));
- li(at, Operand(Smi::FromInt(0)));
- ThrowIf(lt, kIndexIsNegative, index, Operand(at));
+ ASSERT(Smi::FromInt(0) == 0);
+ ThrowIf(lt, kIndexIsNegative, index, Operand(zero_reg));
SmiUntag(index, index);
}
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index d3fdbd6..e9d9f1c 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1371,6 +1371,21 @@
Addu(dst, src, src);
}
+ // Try to convert int32 to smi. If the value is to large, preserve
+ // the original value and jump to not_a_smi. Destroys scratch and
+ // sets flags.
+ void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
+ TrySmiTag(reg, reg, scratch, not_a_smi);
+ }
+ void TrySmiTag(Register dst,
+ Register src,
+ Register scratch,
+ Label* not_a_smi) {
+ SmiTagCheckOverflow(at, src, scratch);
+ BranchOnOverflow(not_a_smi, scratch);
+ mov(dst, at);
+ }
+
void SmiUntag(Register reg) {
sra(reg, reg, kSmiTagSize);
}
@@ -1379,6 +1394,14 @@
sra(dst, src, kSmiTagSize);
}
+ // Test if the register contains a smi.
+ inline void SmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask));
+ }
+ inline void NonNegativeSmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
+ }
+
// Untag the source value into destination and jump if source is a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 921a47c..9b31b4d 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -2316,7 +2316,7 @@
// If the argument is a smi, just return.
STATIC_ASSERT(kSmiTag == 0);
- __ And(t0, v0, Operand(kSmiTagMask));
+ __ SmiTst(v0, t0);
__ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
diff --git a/src/objects.cc b/src/objects.cc
index 627d371..c874ee3 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2784,7 +2784,6 @@
Handle<Map> new_map = split_map;
for (; descriptor < descriptors; descriptor++) {
new_map = Map::CopyInstallDescriptors(new_map, descriptor, new_descriptors);
- new_map->set_migration_target(true);
}
new_map->set_owns_descriptors(true);
@@ -3875,6 +3874,7 @@
Handle<Map> original_map(object->map());
GeneralizeFieldRepresentation(
object, 0, Representation::None(), ALLOW_AS_CONSTANT);
+ object->map()->set_migration_target(true);
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, object->map());
}
diff --git a/src/runtime.cc b/src/runtime.cc
index b5a1081..f546629 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -700,7 +700,7 @@
isolate, phantom_array_buffer->byte_length());
isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<intptr_t>(allocated_length));
+ -static_cast<int64_t>(allocated_length));
CHECK(V8::ArrayBufferAllocator() != NULL);
V8::ArrayBufferAllocator()->Free(
phantom_array_buffer->backing_store(),
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 04c0044..03e69bf 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -32,8 +32,6 @@
#include "scopeinfo.h"
#include "scopes.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
diff --git a/src/scopes.cc b/src/scopes.cc
index 137ab68..fefc696 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -35,8 +35,6 @@
#include "messages.h"
#include "scopeinfo.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 45b675f..e2d15f5 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -30,8 +30,6 @@
#include "factory.h"
#include "string-stream.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
@@ -299,8 +297,7 @@
void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
isolate->set_string_stream_current_security_token(NULL);
if (isolate->string_stream_debug_object_cache() == NULL) {
- isolate->set_string_stream_debug_object_cache(
- new List<HeapObject*, PreallocatedStorageAllocationPolicy>(0));
+ isolate->set_string_stream_debug_object_cache(new DebugObjectCache(0));
}
isolate->string_stream_debug_object_cache()->Clear();
}
diff --git a/src/version.cc b/src/version.cc
index 08606aa..8ee0430 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 23
-#define BUILD_NUMBER 8
+#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 8947e2f..71b5468 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1596,8 +1596,7 @@
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- int depth = 1;
- expr->BuildConstantProperties(isolate(), &depth);
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
@@ -1607,7 +1606,7 @@
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- depth > 1 || Serializer::enabled() ||
+ expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1726,8 +1725,7 @@
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- int depth = 1;
- expr->BuildConstantElements(isolate(), &depth);
+ expr->BuildConstantElements(isolate());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1754,7 +1752,7 @@
DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
- } else if (depth > 1 || Serializer::enabled() ||
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index ba8ccc5..0c95745 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1013,36 +1013,6 @@
__ bind(&left_is_not_negative);
__ andl(left_reg, Immediate(divisor - 1));
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
- Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmpl(right_reg, Immediate(divisor));
- DeoptimizeIf(not_equal, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ testl(left_reg, left_reg);
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ negl(left_reg);
- __ andl(left_reg, Immediate(divisor - 1));
- __ negl(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&left_is_not_negative);
- __ andl(left_reg, Immediate(divisor - 1));
- __ bind(&done);
-
} else {
Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(rax));
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 0b2bf82..2a73472 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1469,11 +1469,6 @@
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegisterAtStart(right),
- NULL);
- return AssignEnvironment(DefineSameAsFirst(mod));
} else {
// The temporary operand is necessary to ensure that right is not
// allocated into edx.
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 4c61f24..963bc12 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -436,21 +436,16 @@
}
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context);
+
+
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] - rsp[56] : FunctionCallbackInfo, incl.
- // : object passing the type check
- // (set by CheckPrototypes)
- // -- rsp[64] : last argument
- // -- ...
- // -- rsp[(argc + 7) * 8] : first argument
- // -- rsp[(argc + 8) * 8] : receiver
- // -----------------------------------
+ int argc) {
typedef FunctionCallbackArguments FCA;
StackArgumentsAccessor args(rsp, argc + kFastApiCallArguments);
@@ -485,8 +480,121 @@
// Prepare arguments.
STATIC_ASSERT(kFastApiCallArguments == 7);
- __ lea(rbx, Operand(rsp, 1 * kPointerSize));
+ __ lea(rax, Operand(rsp, 1 * kPointerSize));
+ GenerateFastApiCallBody(masm, optimization, argc, false);
+}
+
+
+// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+
+ // Copy return value.
+ __ pop(scratch1);
+
+ // receiver
+ __ push(receiver);
+
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch1.is(arg));
+ ASSERT(!scratch2.is(arg));
+ ASSERT(!scratch3.is(arg));
+ __ push(arg);
+ }
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // context save
+ __ push(rsi);
+
+ // Get the function and setup the context.
+ Handle<JSFunction> function = optimization.constant_function();
+ __ Move(scratch2, function);
+ __ push(scratch2);
+
+ Isolate* isolate = masm->isolate();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data(), isolate);
+ // Push data from ExecutableAccessorInfo.
+ bool call_data_undefined = false;
+ if (isolate->heap()->InNewSpace(*call_data)) {
+ __ Move(scratch2, api_call_info);
+ __ movq(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
+ } else if (call_data->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(scratch3, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ Move(scratch3, call_data);
+ }
+ // call data
+ __ push(scratch3);
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch3, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ push(scratch3);
+ // return value default
+ __ push(scratch3);
+ // isolate
+ __ Move(scratch3,
+ ExternalReference::isolate_address(masm->isolate()));
+ __ push(scratch3);
+ // holder
+ __ push(receiver);
+
+ ASSERT(!scratch1.is(rax));
+ // store receiver address for GenerateFastApiCallBody
+ __ movq(rax, rsp);
+
+ // return address
+ __ push(scratch1);
+
+ GenerateFastApiCallBody(masm, optimization, argc, true);
+}
+
+
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] - rsp[56] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- rsp[64] : last argument
+ // -- ...
+ // -- rsp[(argc + 7) * 8] : first argument
+ // -- rsp[(argc + 8) * 8] : receiver
+ //
+ // rax : receiver address
+ // -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
@@ -496,9 +604,9 @@
__ PrepareCallApiFunction(kApiStackSpace);
- __ movq(StackSpaceOperand(0), rbx); // FunctionCallbackInfo::implicit_args_.
- __ addq(rbx, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
- __ movq(StackSpaceOperand(1), rbx); // FunctionCallbackInfo::values_.
+ __ movq(StackSpaceOperand(0), rax); // FunctionCallbackInfo::implicit_args_.
+ __ addq(rax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
+ __ movq(StackSpaceOperand(1), rax); // FunctionCallbackInfo::values_.
__ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
// FunctionCallbackInfo::is_construct_call_.
__ Set(StackSpaceOperand(3), 0);
@@ -532,39 +640,6 @@
}
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- const int fast_api_call_argc = argc + kFastApiCallArguments;
- StackArgumentsAccessor args(rsp, fast_api_call_argc);
- // argc + 1 is the argument number before FastApiCall arguments, 1 ~ receiver
- const int kHolderIndex = argc + 1 +
- kFastApiCallArguments - 1 - FunctionCallbackArguments::kHolderIndex;
- __ movq(scratch, StackOperandForReturnAddress(0));
- // Assign stack space for the call arguments and receiver.
- __ subq(rsp, Immediate((fast_api_call_argc + 1) * kPointerSize));
- __ movq(StackOperandForReturnAddress(0), scratch);
- // Write holder to stack frame.
- __ movq(args.GetArgumentOperand(kHolderIndex), receiver);
- __ movq(args.GetReceiverOperand(), receiver);
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ movq(args.GetArgumentOperand(i + 1), values[i]);
- }
-
- GenerateFastApiCall(masm, optimization, argc, true);
-}
-
-
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -675,7 +750,7 @@
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -1294,7 +1369,8 @@
void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+ masm(), call_optimization, receiver(),
+ scratch1(), scratch2(), name(), 0, NULL);
}
@@ -2501,7 +2577,7 @@
StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize));
__ movq(StackOperandForReturnAddress(0), rax);
- GenerateFastApiCall(masm(), optimization, argc, false);
+ GenerateFastApiCall(masm(), optimization, argc);
__ bind(&miss);
__ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
@@ -2821,7 +2897,8 @@
Register values[] = { value() };
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 1, values);
+ masm(), call_optimization, receiver(), scratch1(),
+ scratch2(), this->name(), 1, values);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/test/benchmarks/testcfg.py b/test/benchmarks/testcfg.py
index b15553a..c94a35f 100644
--- a/test/benchmarks/testcfg.py
+++ b/test/benchmarks/testcfg.py
@@ -64,7 +64,7 @@
"octane/crypto",
"octane/deltablue",
"octane/earley-boyer",
- "octane/gbemu",
+ "octane/gbemu-part1",
"octane/mandreel",
"octane/navier-stokes",
"octane/pdfjs",
@@ -72,6 +72,8 @@
"octane/regexp",
"octane/richards",
"octane/splay",
+ "octane/typescript",
+ "octane/zlib",
"sunspider/3d-cube",
"sunspider/3d-morph",
@@ -111,6 +113,14 @@
elif testcase.path.startswith("octane"):
result.append(os.path.join(self.testroot, "octane/base.js"))
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
+ if testcase.path.startswith("octane/gbemu"):
+ result.append(os.path.join(self.testroot, "octane/gbemu-part2.js"))
+ elif testcase.path.startswith("octane/typescript"):
+ result.append(os.path.join(self.testroot,
+ "octane/typescript-compiler.js"))
+ result.append(os.path.join(self.testroot, "octane/typescript-input.js"))
+ elif testcase.path.startswith("octane/zlib"):
+ result.append(os.path.join(self.testroot, "octane/zlib-data.js"))
result += ["-e", "BenchmarkSuite.RunSuites({});"]
elif testcase.path.startswith("sunspider"):
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
@@ -158,8 +168,8 @@
self._DownloadIfNecessary(
("http://svn.webkit.org/repository/webkit/trunk/PerformanceTests/"
- "SunSpider/tests/sunspider-1.0/"),
- "153700", "sunspider")
+ "SunSpider/tests/sunspider-1.0.2/"),
+ "159499", "sunspider")
self._DownloadIfNecessary(
("http://kraken-mirror.googlecode.com/svn/trunk/kraken/tests/"
@@ -168,7 +178,7 @@
self._DownloadIfNecessary(
"http://octane-benchmark.googlecode.com/svn/trunk/",
- "22", "octane")
+ "26", "octane")
os.chdir(old_cwd)
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index d73399f..a596add 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -30,9 +30,6 @@
# All tests prefixed with 'Bug' are expected to fail.
'test-api/Bug*': [FAIL],
- # TODO(mvstanton): reenable when I figure out why it fails on no-sse2 builds
- 'test-heap/EnsureAllocationSiteDependentCodesProcessed': [SKIP],
-
##############################################################################
# BUG(382): Weird test. Can't guarantee that it never times out.
'test-api/ApplyInterruption': [PASS, TIMEOUT],
@@ -72,7 +69,7 @@
'test-cpu-profiler/CollectCpuProfile': [PASS, FAIL],
# BUG(3005).
- 'test-alloc/CodeRange': [PASS, FLAKY],
+ 'test-alloc/CodeRange': [PASS, FAIL],
}], # 'system == windows'
##############################################################################
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 6a75cd7..2df0a89 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -4263,7 +4263,7 @@
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(5 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
// Execute a script that causes out of memory.
LocalContext context;
@@ -4304,7 +4304,7 @@
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(5 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
@@ -4333,7 +4333,7 @@
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
// Execute a script that causes out of memory.
v8::V8::IgnoreOutOfMemoryException();
@@ -13748,20 +13748,17 @@
}
-static int64_t cast(intptr_t x) { return static_cast<int64_t>(x); }
-
-
THREADED_TEST(ExternalAllocatedMemory) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer(isolate);
v8::Local<Context> env(Context::New(isolate));
CHECK(!env.IsEmpty());
- const intptr_t kSize = 1024*1024;
- int64_t baseline = cast(isolate->AdjustAmountOfExternalAllocatedMemory(0));
- CHECK_EQ(baseline + cast(kSize),
- cast(isolate->AdjustAmountOfExternalAllocatedMemory(kSize)));
+ const int64_t kSize = 1024*1024;
+ int64_t baseline = isolate->AdjustAmountOfExternalAllocatedMemory(0);
+ CHECK_EQ(baseline + kSize,
+ isolate->AdjustAmountOfExternalAllocatedMemory(kSize));
CHECK_EQ(baseline,
- cast(isolate->AdjustAmountOfExternalAllocatedMemory(-kSize)));
+ isolate->AdjustAmountOfExternalAllocatedMemory(-kSize));
}
@@ -17172,7 +17169,7 @@
// Set stack limit.
v8::ResourceConstraints constraints;
constraints.set_stack_limit(set_limit);
- CHECK(v8::SetResourceConstraints(&constraints));
+ CHECK(v8::SetResourceConstraints(CcTest::isolate(), &constraints));
// Execute a script.
LocalContext env;
@@ -17196,7 +17193,7 @@
// Set stack limit.
v8::ResourceConstraints constraints;
constraints.set_stack_limit(set_limit);
- CHECK(v8::SetResourceConstraints(&constraints));
+ CHECK(v8::SetResourceConstraints(CcTest::isolate(), &constraints));
// Execute a script.
v8::HandleScope scope(CcTest::isolate());
@@ -18646,7 +18643,7 @@
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
break;
}
@@ -19937,16 +19934,28 @@
v8::Isolate* isolate = v8::Isolate::New();
isolate->Enter();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- CHECK_EQ(NULL, isolate->GetData());
- CHECK_EQ(NULL, i_isolate->GetData());
- static void* data1 = reinterpret_cast<void*>(0xacce55ed);
- isolate->SetData(data1);
- CHECK_EQ(data1, isolate->GetData());
- CHECK_EQ(data1, i_isolate->GetData());
- static void* data2 = reinterpret_cast<void*>(0xdecea5ed);
- i_isolate->SetData(data2);
- CHECK_EQ(data2, isolate->GetData());
- CHECK_EQ(data2, i_isolate->GetData());
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ CHECK_EQ(NULL, isolate->GetData(slot));
+ CHECK_EQ(NULL, i_isolate->GetData(slot));
+ }
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ void* data = reinterpret_cast<void*>(0xacce55ed + slot);
+ isolate->SetData(slot, data);
+ }
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ void* data = reinterpret_cast<void*>(0xacce55ed + slot);
+ CHECK_EQ(data, isolate->GetData(slot));
+ CHECK_EQ(data, i_isolate->GetData(slot));
+ }
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ void* data = reinterpret_cast<void*>(0xdecea5ed + slot);
+ isolate->SetData(slot, data);
+ }
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ void* data = reinterpret_cast<void*>(0xdecea5ed + slot);
+ CHECK_EQ(data, isolate->GetData(slot));
+ CHECK_EQ(data, i_isolate->GetData(slot));
+ }
isolate->Exit();
isolate->Dispose();
}
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 448c589..29533d4 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -3570,6 +3570,8 @@
v8::internal::Heap* heap = CcTest::heap();
GlobalHandles* global_handles = isolate->global_handles();
+ if (!isolate->use_crankshaft()) return;
+
// The allocation site at the head of the list is ours.
Handle<AllocationSite> site;
{
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 4aa74a8..333e2a6 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -1213,7 +1213,7 @@
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
// String s is made of 2^17 = 131072 'c' characters and a is an array
// starting with 'bad', followed by 2^14 times the string s. That means the
diff --git a/test/mjsunit/mjsunit.js b/test/mjsunit/mjsunit.js
index 1293537..e5fb6c2 100644
--- a/test/mjsunit/mjsunit.js
+++ b/test/mjsunit/mjsunit.js
@@ -54,6 +54,10 @@
// and the properties of non-Array objects).
var assertEquals;
+
+// The difference between expected and found value is within certain tolerance.
+var assertEqualsDelta;
+
// The found object is an Array with the same length and elements
// as the expected object. The expected object doesn't need to be an Array,
// as long as it's "array-ish".
@@ -247,6 +251,12 @@
};
+ assertEqualsDelta =
+ function assertEqualsDelta(expected, found, delta, name_opt) {
+ assertTrue(Math.abs(expected - found) <= delta, name_opt);
+ };
+
+
assertArrayEquals = function assertArrayEquals(expected, found, name_opt) {
var start = "";
if (name_opt) {
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index d975f2b..be3ec52 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -33,7 +33,7 @@
##############################################################################
# Flaky tests.
# BUG(v8:2921).
- 'debug-step-4-in-frame': [PASS, FLAKY],
+ 'debug-step-4-in-frame': [PASS, FAIL],
##############################################################################
# Fails.
diff --git a/test/mjsunit/regexp-multiline-stack-trace.js b/test/mjsunit/regexp-multiline-stack-trace.js
deleted file mode 100644
index fc248ef..0000000
--- a/test/mjsunit/regexp-multiline-stack-trace.js
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The flags below are to test the trace-calls functionality and the
-// preallocated meessage memory.
-// Flags: --trace-calls --preallocate-message-memory
-
-/**
- * @fileoverview Check that various regexp constructs work as intended.
- * Particularly those regexps that use ^ and $.
- */
-
-assertTrue(/^bar/.test("bar"));
-assertTrue(/^bar/.test("bar\nfoo"));
-assertFalse(/^bar/.test("foo\nbar"));
-assertTrue(/^bar/m.test("bar"));
-assertTrue(/^bar/m.test("bar\nfoo"));
-assertTrue(/^bar/m.test("foo\nbar"));
-
-assertTrue(/bar$/.test("bar"));
-assertFalse(/bar$/.test("bar\nfoo"));
-assertTrue(/bar$/.test("foo\nbar"));
-assertTrue(/bar$/m.test("bar"));
-assertTrue(/bar$/m.test("bar\nfoo"));
-assertTrue(/bar$/m.test("foo\nbar"));
-
-assertFalse(/^bxr/.test("bar"));
-assertFalse(/^bxr/.test("bar\nfoo"));
-assertFalse(/^bxr/m.test("bar"));
-assertFalse(/^bxr/m.test("bar\nfoo"));
-assertFalse(/^bxr/m.test("foo\nbar"));
-
-assertFalse(/bxr$/.test("bar"));
-assertFalse(/bxr$/.test("foo\nbar"));
-assertFalse(/bxr$/m.test("bar"));
-assertFalse(/bxr$/m.test("bar\nfoo"));
-assertFalse(/bxr$/m.test("foo\nbar"));
-
-
-assertTrue(/^.*$/.test(""));
-assertTrue(/^.*$/.test("foo"));
-assertFalse(/^.*$/.test("\n"));
-assertTrue(/^.*$/m.test("\n"));
-
-assertTrue(/^[\s]*$/.test(" "));
-assertTrue(/^[\s]*$/.test("\n"));
-
-assertTrue(/^[^]*$/.test(""));
-assertTrue(/^[^]*$/.test("foo"));
-assertTrue(/^[^]*$/.test("\n"));
-
-assertTrue(/^([()\s]|.)*$/.test("()\n()"));
-assertTrue(/^([()\n]|.)*$/.test("()\n()"));
-assertFalse(/^([()]|.)*$/.test("()\n()"));
-assertTrue(/^([()]|.)*$/m.test("()\n()"));
-assertTrue(/^([()]|.)*$/m.test("()\n"));
-assertTrue(/^[()]*$/m.test("()\n."));
-
-assertTrue(/^[\].]*$/.test("...]..."));
-
-
-function check_case(lc, uc) {
- var a = new RegExp("^" + lc + "$");
- assertFalse(a.test(uc));
- a = new RegExp("^" + lc + "$", "i");
- assertTrue(a.test(uc));
-
- var A = new RegExp("^" + uc + "$");
- assertFalse(A.test(lc));
- A = new RegExp("^" + uc + "$", "i");
- assertTrue(A.test(lc));
-
- a = new RegExp("^[" + lc + "]$");
- assertFalse(a.test(uc));
- a = new RegExp("^[" + lc + "]$", "i");
- assertTrue(a.test(uc));
-
- A = new RegExp("^[" + uc + "]$");
- assertFalse(A.test(lc));
- A = new RegExp("^[" + uc + "]$", "i");
- assertTrue(A.test(lc));
-}
-
-
-check_case("a", "A");
-// Aring
-check_case(String.fromCharCode(229), String.fromCharCode(197));
-// Russian G
-check_case(String.fromCharCode(0x413), String.fromCharCode(0x433));
-
-
-assertThrows("a = new RegExp('[z-a]');");
diff --git a/test/mjsunit/sin-cos.js b/test/mjsunit/sin-cos.js
index 1176b6c..b63c15e 100644
--- a/test/mjsunit/sin-cos.js
+++ b/test/mjsunit/sin-cos.js
@@ -27,6 +27,8 @@
// Test Math.sin and Math.cos.
+// Flags: --allow-natives-syntax
+
function sinTest() {
assertEquals(0, Math.sin(0));
assertEquals(1, Math.sin(Math.PI / 2));
@@ -97,7 +99,7 @@
var test_inputs = [];
for (var i = -10000; i < 10000; i += 177) test_inputs.push(i/1257);
-var epsilon = 0.000001;
+var epsilon = 0.0000001;
test_inputs.push(0);
test_inputs.push(0 + epsilon);
@@ -117,8 +119,8 @@
var x = test_inputs[i];
var err_sin = abs_error(Math.sin, sin, x);
var err_cos = abs_error(Math.cos, cos, x)
- assertTrue(err_sin < 1E-13);
- assertTrue(err_cos < 1E-13);
+ assertEqualsDelta(0, err_sin, 1E-13);
+ assertEqualsDelta(0, err_cos, 1E-13);
squares.push(err_sin*err_sin + err_cos*err_cos);
}
@@ -132,7 +134,7 @@
}
var err_rms = Math.sqrt(squares[0] / test_inputs.length / 2);
-assertTrue(err_rms < 1E-14);
+assertEqualsDelta(0, err_rms, 1E-14);
assertEquals(-1, Math.cos({ valueOf: function() { return Math.PI; } }));
assertEquals(0, Math.sin("0x00000"));
@@ -141,3 +143,40 @@
assertTrue(isNaN(Math.cos("-Infinity")));
assertEquals("Infinity", String(Math.tan(Math.PI/2)));
assertEquals("-Infinity", String(Math.tan(-Math.PI/2)));
+assertEquals("-Infinity", String(1/Math.sin("-0")));
+
+// Assert that the remainder after division by pi is reasonably precise.
+function assertError(expected, x, epsilon) {
+ assertTrue(Math.abs(x - expected) < epsilon);
+}
+
+assertEqualsDelta(0.9367521275331447, Math.cos(1e06), 1e-15);
+assertEqualsDelta(0.8731196226768560, Math.cos(1e10), 1e-08);
+assertEqualsDelta(0.9367521275331447, Math.cos(-1e06), 1e-15);
+assertEqualsDelta(0.8731196226768560, Math.cos(-1e10), 1e-08);
+assertEqualsDelta(-0.3499935021712929, Math.sin(1e06), 1e-15);
+assertEqualsDelta(-0.4875060250875106, Math.sin(1e10), 1e-08);
+assertEqualsDelta(0.3499935021712929, Math.sin(-1e06), 1e-15);
+assertEqualsDelta(0.4875060250875106, Math.sin(-1e10), 1e-08);
+assertEqualsDelta(0.7796880066069787, Math.sin(1e16), 1e-05);
+assertEqualsDelta(-0.6261681981330861, Math.cos(1e16), 1e-05);
+
+// Assert that remainder calculation terminates.
+for (var i = -1024; i < 1024; i++) {
+ assertFalse(isNaN(Math.sin(Math.pow(2, i))));
+}
+
+assertFalse(isNaN(Math.cos(1.57079632679489700)));
+assertFalse(isNaN(Math.cos(-1e-100)));
+assertFalse(isNaN(Math.cos(-1e-323)));
+
+
+function no_deopt_on_minus_zero(x) {
+ return Math.sin(x) + Math.cos(x) + Math.tan(x);
+}
+
+no_deopt_on_minus_zero(1);
+no_deopt_on_minus_zero(1);
+%OptimizeFunctionOnNextCall(no_deopt_on_minus_zero);
+no_deopt_on_minus_zero(-0);
+assertOptimized(no_deopt_on_minus_zero);
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 9e23dce..d5e851c 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -599,10 +599,6 @@
# Negative hexadecimal literals are parsed as NaN. This test is outdated.
'ecma/TypeConversion/9.3.1-3': [FAIL_OK],
-
- # Math.tan expectations are more strict than the spec.
- 'ecma/Math/15.8.2.18': [FAIL_OK],
-
##################### FAILING TESTS #####################
# This section is for tests that fail in V8 and pass in JSC.
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index f7e94a7..d5c33c6 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -59,7 +59,6 @@
['component=="shared_library"', {
'type': '<(component)',
'sources': [
- '../../src/defaults.cc',
# Note: on non-Windows we still build this file so that gyp
# has some sources to link into the component.
'../../src/v8dll-main.cc',
@@ -865,10 +864,6 @@
'BUILDING_V8_SHARED',
'V8_SHARED',
],
- }, {
- 'sources': [
- '../../src/defaults.cc',
- ],
}],
['v8_postmortem_support=="true"', {
'sources': [
diff --git a/tools/push-to-trunk/common_includes.py b/tools/push-to-trunk/common_includes.py
index 9547367..b6f9761 100644
--- a/tools/push-to-trunk/common_includes.py
+++ b/tools/push-to-trunk/common_includes.py
@@ -81,17 +81,50 @@
return "".join(result)
-def MakeChangeLogBody(commit_generator):
+def MakeComment(text):
+ return MSub(r"^( ?)", "#", text)
+
+
+def StripComments(text):
+ # Use split not splitlines to keep terminal newlines.
+ return "\n".join(filter(lambda x: not x.startswith("#"), text.split("\n")))
+
+
+def MakeChangeLogBody(commit_messages, auto_format=False):
result = ""
- for (title, body, author) in commit_generator():
+ added_titles = set()
+ for (title, body, author) in commit_messages:
+ # TODO(machenbach): Reload the commit description from rietveld in order to
+ # catch late changes.
+ title = title.rstrip()
+ if auto_format:
+ # Only add commits that set the LOG flag correctly.
+ log_exp = r"^[ \t]*LOG[ \t]*=[ \t]*(?:Y(?:ES)?)|TRUE"
+ if not re.search(log_exp, body, flags=re.I | re.M):
+ continue
+ # Never include reverts.
+ if title.startswith("Revert "):
+ continue
+ # Don't include duplicates.
+ if title in added_titles:
+ continue
+
+ # TODO(machenbach): Let python do all formatting. Get raw git title, attach
+ # issue and add/move dot to the end - all in one line. Make formatting and
+ # indentation afterwards.
+
# Add the commit's title line.
- result += "%s\n" % title.rstrip()
+ result += "%s\n" % title
+ added_titles.add(title)
# Add bug references.
result += MakeChangeLogBugReference(body)
- # Append the commit's author for reference.
- result += "%s\n\n" % author.rstrip()
+ # Append the commit's author for reference if not in auto-format mode.
+ if not auto_format:
+ result += "%s\n" % author.rstrip()
+
+ result += "\n"
return result
@@ -370,15 +403,18 @@
Step.__init__(self, "Upload for code review.")
def RunStep(self):
- if self._options and self._options.r:
+ if self._options.r:
print "Using account %s for review." % self._options.r
reviewer = self._options.r
else:
print "Please enter the email address of a V8 reviewer for your patch: ",
self.DieInForcedMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
- args = "cl upload -r \"%s\" --send-mail" % reviewer
- if self.Git(args,pipe=False) is None:
+ force_flag = " -f" if self._options.f else ""
+ args = "cl upload -r \"%s\" --send-mail%s" % (reviewer, force_flag)
+ # TODO(machenbach): Check output in forced mode. Verify that all required
+ # base files were uploaded, if not retry.
+ if self.Git(args, pipe=False) is None:
self.Die("'git cl upload' failed, please try again.")
diff --git a/tools/push-to-trunk/push_to_trunk.py b/tools/push-to-trunk/push_to_trunk.py
index c8fdc7e..7278323 100755
--- a/tools/push-to-trunk/push_to_trunk.py
+++ b/tools/push-to-trunk/push_to_trunk.py
@@ -112,20 +112,29 @@
args = "log %s..HEAD --format=%%H" % self._state["last_push"]
commits = self.Git(args).strip()
- def GetCommitMessages():
- for commit in commits.splitlines():
- yield [
- self.Git("log -1 %s --format=\"%%w(80,8,8)%%s\"" % commit),
- self.Git("log -1 %s --format=\"%%B\"" % commit),
- self.Git("log -1 %s --format=\"%%w(80,8,8)(%%an)\"" % commit),
- ]
+ # Cache raw commit messages.
+ commit_messages = [
+ [
+ self.Git("log -1 %s --format=\"%%w(80,8,8)%%s\"" % commit),
+ self.Git("log -1 %s --format=\"%%B\"" % commit),
+ self.Git("log -1 %s --format=\"%%w(80,8,8)(%%an)\"" % commit),
+ ] for commit in commits.splitlines()
+ ]
- body = MakeChangeLogBody(GetCommitMessages)
+ # Auto-format commit messages.
+ body = MakeChangeLogBody(commit_messages, auto_format=True)
AppendToFile(body, self.Config(CHANGELOG_ENTRY_FILE))
- msg = " Performance and stability improvements on all platforms.\n"
+ msg = (" Performance and stability improvements on all platforms."
+ "\n#\n# The change log above is auto-generated. Please review if "
+ "all relevant\n# commit messages from the list below are included."
+ "\n# All lines starting with # will be stripped.\n#\n")
AppendToFile(msg, self.Config(CHANGELOG_ENTRY_FILE))
+ # Include unformatted commit messages as a reference in a comment.
+ comment_body = MakeComment(MakeChangeLogBody(commit_messages))
+ AppendToFile(comment_body, self.Config(CHANGELOG_ENTRY_FILE))
+
class EditChangeLog(Step):
def __init__(self):
@@ -143,9 +152,10 @@
handle, new_changelog = tempfile.mkstemp()
os.close(handle)
- # (1) Eliminate tabs, (2) fix too little and (3) too much indentation, and
- # (4) eliminate trailing whitespace.
+ # (1) Strip comments, (2) eliminate tabs, (3) fix too little and (4) too
+ # much indentation, and (5) eliminate trailing whitespace.
changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE)).rstrip()
+ changelog_entry = StripComments(changelog_entry)
changelog_entry = MSub(r"\t", r" ", changelog_entry)
changelog_entry = MSub(r"^ {1,7}([^ ])", r" \1", changelog_entry)
changelog_entry = MSub(r"^ {9,80}([^ ])", r" \1", changelog_entry)
@@ -457,7 +467,8 @@
args = "commit -am \"Update V8 to version %s.\n\nTBR=%s\"" % (ver, rev)
if self.Git(args) is None:
self.Die("'git commit' failed.")
- if self.Git("cl upload --send-mail", pipe=False) is None:
+ force_flag = " -f" if self._options.f else ""
+ if self.Git("cl upload --send-mail%s" % force_flag, pipe=False) is None:
self.Die("'git cl upload' failed, please try again.")
print "CL uploaded."
diff --git a/tools/push-to-trunk/test_scripts.py b/tools/push-to-trunk/test_scripts.py
index 6ce22de..42b26cf 100644
--- a/tools/push-to-trunk/test_scripts.py
+++ b/tools/push-to-trunk/test_scripts.py
@@ -53,8 +53,20 @@
class ToplevelTest(unittest.TestCase):
+ def testMakeComment(self):
+ self.assertEquals("# Line 1\n# Line 2\n#",
+ MakeComment(" Line 1\n Line 2\n"))
+ self.assertEquals("#Line 1\n#Line 2",
+ MakeComment("Line 1\n Line 2"))
+
+ def testStripComments(self):
+ self.assertEquals(" Line 1\n Line 3\n",
+ StripComments(" Line 1\n# Line 2\n Line 3\n#\n"))
+ self.assertEquals("\nLine 2 ### Test\n #",
+ StripComments("###\n# \n\n# Line 1\nLine 2 ### Test\n #"))
+
def testMakeChangeLogBodySimple(self):
- commits = lambda: [
+ commits = [
[" Title text 1",
"Title text 1\n\nBUG=\n",
" author1@chromium.org"],
@@ -70,8 +82,27 @@
MakeChangeLogBody(commits))
def testMakeChangeLogBodyEmpty(self):
- commits = lambda: []
- self.assertEquals("", MakeChangeLogBody(commits))
+ self.assertEquals("", MakeChangeLogBody([]))
+
+ def testMakeChangeLogBodyAutoFormat(self):
+ commits = [
+ [" Title text 1",
+ "Title text 1\nLOG=y\nBUG=\n",
+ " author1@chromium.org"],
+ [" Title text 2",
+ "Title text 2\n\nBUG=1234\n",
+ " author2@chromium.org"],
+ [" Title text 3",
+ "Title text 3\n\nBUG=1234\nLOG = Yes\n",
+ " author3@chromium.org"],
+ [" Title text 3",
+ "Title text 4\n\nBUG=1234\nLOG=\n",
+ " author4@chromium.org"],
+ ]
+ self.assertEquals(" Title text 1\n\n"
+ " Title text 3\n"
+ " (Chromium issue 1234)\n\n",
+ MakeChangeLogBody(commits, True))
def testMakeChangeLogBugReferenceEmpty(self):
self.assertEquals("", MakeChangeLogBugReference(""))
@@ -327,28 +358,55 @@
TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
self._git_recipe = [
- ["log 1234..HEAD --format=%H", "rev1\nrev2"],
+ ["log 1234..HEAD --format=%H", "rev1\nrev2\nrev3"],
["log -1 rev1 --format=\"%w(80,8,8)%s\"", " Title text 1"],
- ["log -1 rev1 --format=\"%B\"", "Title\n\nBUG=\n"],
+ ["log -1 rev1 --format=\"%B\"", "Title\n\nBUG=\nLOG=y\n"],
["log -1 rev1 --format=\"%w(80,8,8)(%an)\"",
" author1@chromium.org"],
["log -1 rev2 --format=\"%w(80,8,8)%s\"", " Title text 2"],
- ["log -1 rev2 --format=\"%B\"", "Title\n\nBUG=321\n"],
+ ["log -1 rev2 --format=\"%B\"", "Title\n\nBUG=123\nLOG= \n"],
["log -1 rev2 --format=\"%w(80,8,8)(%an)\"",
" author2@chromium.org"],
+ ["log -1 rev3 --format=\"%w(80,8,8)%s\"", " Title text 3"],
+ ["log -1 rev3 --format=\"%B\"", "Title\n\nBUG=321\nLOG=true\n"],
+ ["log -1 rev3 --format=\"%w(80,8,8)(%an)\"",
+ " author3@chromium.org"],
]
self.MakeStep().Persist("last_push", "1234")
self.MakeStep(PrepareChangeLog).Run()
- cl = FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])
- self.assertTrue(re.search(r"\d+\-\d+\-\d+: Version 3\.22\.5", cl))
- self.assertTrue(re.search(r" Title text 1", cl))
- self.assertTrue(re.search(r" Title text 2", cl))
- self.assertTrue(re.search(r" author1@chromium.org", cl))
- self.assertTrue(re.search(r" author2@chromium.org", cl))
- self.assertTrue(re.search(r" \(Chromium issue 321\)", cl))
- self.assertFalse(re.search(r"BUG=", cl))
+ actual_cl = FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+
+ # TODO(machenbach): Mock out call to date() in order to make a fixed
+ # comparison here instead of a regexp match.
+ expected_cl = """\\d+\\-\\d+\\-\\d+: Version 3\\.22\\.5
+
+ Title text 1
+
+ Title text 3
+ \\(Chromium issue 321\\)
+
+ Performance and stability improvements on all platforms\\.
+#
+# The change log above is auto-generated\\. Please review if all relevant
+# commit messages from the list below are included\\.
+# All lines starting with # will be stripped\\.
+#
+# Title text 1
+# author1@chromium\\.org
+#
+# Title text 2
+# \\(Chromium issue 123\\)
+# author2@chromium\\.org
+#
+# Title text 3
+# \\(Chromium issue 321\\)
+# author3@chromium\\.org
+#
+#"""
+
+ self.assertTrue(re.match(expected_cl, actual_cl))
self.assertEquals("3", self.MakeStep().Restore("major"))
self.assertEquals("22", self.MakeStep().Restore("minor"))
self.assertEquals("5", self.MakeStep().Restore("build"))
@@ -447,9 +505,17 @@
self.assertTrue(re.search(r"Version 3.22.5", cl))
self.assertTrue(re.search(r" Log text 1", cl))
self.assertTrue(re.search(r" \(issue 321\)", cl))
+ self.assertFalse(re.search(r" author1@chromium\.org", cl))
+
+ # Make sure all comments got stripped.
+ self.assertFalse(re.search(r"^#", cl, flags=re.M))
+
version = FileToText(TEST_CONFIG[VERSION_FILE])
self.assertTrue(re.search(r"#define BUILD_NUMBER\s+6", version))
+ def CheckUpload():
+ cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+
def CheckSVNCommit():
commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
self.assertTrue(re.search(r"Version 3.22.5", commit))
@@ -461,6 +527,7 @@
self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+ force_flag = " -f" if force else ""
self._git_recipe = [
["status -s -uno", ""],
["status -s -b -uno", "## some_branch\n"],
@@ -474,14 +541,15 @@
["log -1 1234", "Last push ouput\n"],
["log 1234..HEAD --format=%H", "rev1\n"],
["log -1 rev1 --format=\"%w(80,8,8)%s\"", " Log text 1.\n"],
- ["log -1 rev1 --format=\"%B\"", "Text\nBUG=v8:321\nText\n"],
+ ["log -1 rev1 --format=\"%B\"", "Text\nLOG=YES\nBUG=v8:321\nText\n"],
["log -1 rev1 --format=\"%w(80,8,8)(%an)\"",
" author1@chromium.org\n"],
[("commit -a -m \"Prepare push to trunk. "
"Now working on version 3.22.6.\""),
" 2 files changed\n",
CheckPreparePush],
- ["cl upload -r \"reviewer@chromium.org\" --send-mail", "done\n"],
+ ["cl upload -r \"reviewer@chromium.org\" --send-mail%s" % force_flag,
+ "done\n"],
["cl dcommit -f", "Closing issue\n"],
["svn fetch", "fetch result\n"],
["checkout svn/bleeding_edge", ""],
@@ -502,7 +570,7 @@
[("commit -am \"Update V8 to version 3.22.5.\n\n"
"TBR=reviewer@chromium.org\""),
""],
- ["cl upload --send-mail", ""],
+ ["cl upload --send-mail%s" % force_flag, ""],
["checkout -f some_branch", ""],
["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""],
["branch -D %s" % TEST_CONFIG[BRANCHNAME], ""],