summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTrevor Norris <trev.norris@gmail.com>2013-05-22 12:56:12 -0700
committerTrevor Norris <trev.norris@gmail.com>2013-05-22 13:13:11 -0700
commit506fc4de1e820d97b637f6e01dda2ab97667efa7 (patch)
treee2f2fec2085f6c1a603b79be4e63e765292cca52
parent52adc0d96309f9e04cbb220d63206e32b8309081 (diff)
downloadnode-new-506fc4de1e820d97b637f6e01dda2ab97667efa7.tar.gz
v8: upgrade to v3.19.3
-rw-r--r--deps/v8/ChangeLog27
-rw-r--r--deps/v8/build/common.gypi10
-rw-r--r--deps/v8/include/v8.h458
-rw-r--r--deps/v8/src/api.cc435
-rw-r--r--deps/v8/src/api.h2
-rw-r--r--deps/v8/src/apiutils.h25
-rw-r--r--deps/v8/src/arguments.cc195
-rw-r--r--deps/v8/src/arguments.h261
-rw-r--r--deps/v8/src/arm/assembler-arm.cc20
-rw-r--r--deps/v8/src/arm/assembler-arm.h20
-rw-r--r--deps/v8/src/arm/builtins-arm.cc49
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc203
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h11
-rw-r--r--deps/v8/src/arm/codegen-arm.cc10
-rw-r--r--deps/v8/src/arm/debug-arm.cc4
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc200
-rw-r--r--deps/v8/src/arm/disasm-arm.cc8
-rw-r--r--deps/v8/src/arm/frames-arm.cc4
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc129
-rw-r--r--deps/v8/src/arm/ic-arm.cc18
-rw-r--r--deps/v8/src/arm/lithium-arm.cc38
-rw-r--r--deps/v8/src/arm/lithium-arm.h42
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc228
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h18
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc123
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h71
-rw-r--r--deps/v8/src/arm/simulator-arm.cc61
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc112
-rw-r--r--deps/v8/src/array.js23
-rw-r--r--deps/v8/src/assembler.h10
-rw-r--r--deps/v8/src/ast.h19
-rw-r--r--deps/v8/src/bootstrapper.cc150
-rw-r--r--deps/v8/src/bootstrapper.h2
-rw-r--r--deps/v8/src/builtins.cc39
-rw-r--r--deps/v8/src/builtins.h3
-rw-r--r--deps/v8/src/checks.cc2
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc5
-rw-r--r--deps/v8/src/code-stubs.cc104
-rw-r--r--deps/v8/src/code-stubs.h154
-rw-r--r--deps/v8/src/compiler.cc31
-rw-r--r--deps/v8/src/compiler.h22
-rw-r--r--deps/v8/src/contexts.h11
-rw-r--r--deps/v8/src/cpu-profiler.cc16
-rw-r--r--deps/v8/src/cpu-profiler.h10
-rw-r--r--deps/v8/src/d8.cc679
-rw-r--r--deps/v8/src/deoptimizer.cc281
-rw-r--r--deps/v8/src/deoptimizer.h92
-rw-r--r--deps/v8/src/disassembler.cc9
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc4
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc2
-rw-r--r--deps/v8/src/factory.h16
-rw-r--r--deps/v8/src/flag-definitions.h10
-rw-r--r--deps/v8/src/frames.h4
-rw-r--r--deps/v8/src/handles.cc17
-rw-r--r--deps/v8/src/heap.cc130
-rw-r--r--deps/v8/src/heap.h45
-rw-r--r--deps/v8/src/hydrogen-instructions.cc104
-rw-r--r--deps/v8/src/hydrogen-instructions.h180
-rw-r--r--deps/v8/src/hydrogen.cc415
-rw-r--r--deps/v8/src/hydrogen.h18
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc5
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc134
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h12
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc215
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc4
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc94
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc222
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h18
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc43
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h56
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc49
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h9
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc70
-rw-r--r--deps/v8/src/ic.cc51
-rw-r--r--deps/v8/src/ic.h2
-rw-r--r--deps/v8/src/isolate.cc47
-rw-r--r--deps/v8/src/isolate.h18
-rw-r--r--deps/v8/src/log.cc10
-rw-r--r--deps/v8/src/log.h3
-rw-r--r--deps/v8/src/mark-compact.cc110
-rw-r--r--deps/v8/src/mark-compact.h36
-rw-r--r--deps/v8/src/messages.js3
-rw-r--r--deps/v8/src/mips/builtins-mips.cc5
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc92
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h11
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc197
-rw-r--r--deps/v8/src/mips/frames-mips.cc4
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc107
-rw-r--r--deps/v8/src/mips/ic-mips.cc2
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc198
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h22
-rw-r--r--deps/v8/src/mips/lithium-mips.cc38
-rw-r--r--deps/v8/src/mips/lithium-mips.h42
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc33
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h5
-rw-r--r--deps/v8/src/mips/simulator-mips.cc49
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc61
-rw-r--r--deps/v8/src/object-observe.js242
-rw-r--r--deps/v8/src/objects-debug.cc11
-rw-r--r--deps/v8/src/objects-inl.h76
-rw-r--r--deps/v8/src/objects-printer.cc4
-rw-r--r--deps/v8/src/objects-visiting-inl.h38
-rw-r--r--deps/v8/src/objects.cc446
-rw-r--r--deps/v8/src/objects.h61
-rw-r--r--deps/v8/src/parser.cc27
-rw-r--r--deps/v8/src/parser.h2
-rw-r--r--deps/v8/src/platform-posix.cc17
-rw-r--r--deps/v8/src/profile-generator-inl.h3
-rw-r--r--deps/v8/src/profile-generator.cc35
-rw-r--r--deps/v8/src/profile-generator.h11
-rw-r--r--deps/v8/src/property-details.h12
-rw-r--r--deps/v8/src/runtime-profiler.cc5
-rw-r--r--deps/v8/src/runtime.cc60
-rw-r--r--deps/v8/src/runtime.h1
-rw-r--r--deps/v8/src/string-stream.h25
-rw-r--r--deps/v8/src/string.js1
-rw-r--r--deps/v8/src/stub-cache.cc56
-rw-r--r--deps/v8/src/stub-cache.h3
-rw-r--r--deps/v8/src/type-info.cc43
-rw-r--r--deps/v8/src/utils.h41
-rw-r--r--deps/v8/src/v8.cc1
-rw-r--r--deps/v8/src/v8natives.js9
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/builtins-x64.cc5
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc104
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h11
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc192
-rw-r--r--deps/v8/src/x64/frames-x64.cc4
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc95
-rw-r--r--deps/v8/src/x64/ic-x64.cc2
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc212
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h19
-rw-r--r--deps/v8/src/x64/lithium-x64.cc38
-rw-r--r--deps/v8/src/x64/lithium-x64.h44
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc41
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h7
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc63
-rw-r--r--deps/v8/test/cctest/cctest.cc2
-rw-r--r--deps/v8/test/cctest/cctest.gyp1
-rw-r--r--deps/v8/test/cctest/cctest.status13
-rw-r--r--deps/v8/test/cctest/test-api.cc447
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc9
-rw-r--r--deps/v8/test/cctest/test-compare-nil-ic-stub.cc86
-rw-r--r--deps/v8/test/cctest/test-conversions.cc19
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc74
-rw-r--r--deps/v8/test/cctest/test-debug.cc10
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc4
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc2
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc20
-rw-r--r--deps/v8/test/cctest/test-heap.cc7
-rw-r--r--deps/v8/test/cctest/test-lockers.cc2
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc11
-rw-r--r--deps/v8/test/cctest/test-parsing.cc3
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/alloc-object.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-code.js79
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-code2.js84
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-code3.js78
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-code4.js78
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-code5.js89
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-code6.js73
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js9
-rw-r--r--deps/v8/test/mjsunit/debug-script.js2
-rw-r--r--deps/v8/test/mjsunit/elements-transition-hoisting.js2
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-1.js52
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-2.js41
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-3.js39
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-4.js39
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-5.js40
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-6.js39
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-7.js40
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-8.js40
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-9.js49
-rw-r--r--deps/v8/test/mjsunit/external-array-no-sse2.js9
-rw-r--r--deps/v8/test/mjsunit/external-array.js9
-rw-r--r--deps/v8/test/mjsunit/fast-element-smi-check.js6
-rw-r--r--deps/v8/test/mjsunit/function-prototype.js23
-rw-r--r--deps/v8/test/mjsunit/generated-transition-stub.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-iteration.js387
-rw-r--r--deps/v8/test/mjsunit/harmony/object-observe.js357
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-241344.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2681.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2686.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-233737.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-242502.js66
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-242870.js43
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js60
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js35
-rw-r--r--deps/v8/test/mjsunit/track-fields.js64
-rw-r--r--deps/v8/test/mjsunit/unbox-double-arrays.js3
-rw-r--r--deps/v8/test/test262/README4
-rw-r--r--deps/v8/test/test262/testcfg.py4
-rw-r--r--deps/v8/tools/gyp/v8.gyp1737
194 files changed, 8638 insertions, 5465 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index d1dbb29ed0..39885e7831 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,30 @@
+2013-05-22: Version 3.19.3
+
+ Performance and stability improvements on all platforms.
+
+
+2013-05-17: Version 3.19.2
+
+ Fill in one-word-fillers for the unused property fields
+ (Chromium issue 240056).
+
+ Removed use_system_v8 logic from the mainline gyp file
+ (Chromium issue 226860).
+
+ Skip CPU profiler samples where top function's stack frame is not
+ set up properly (issue 2628).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-05-14: Version 3.19.1
+
+ Fixed missing hole check for loads from Smi arrays when all uses are
+ changes (Chromium issue 233737)
+
+ Performance and stability improvements on all platforms.
+
+
2013-05-10: Version 3.19.0
Deprecated Context::New which returns Persistent.
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi
index 8028b3eecb..2e3c7854d5 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/common.gypi
@@ -29,7 +29,6 @@
{
'variables': {
- 'use_system_v8%': 0,
'msvs_use_common_release': 0,
'gcc_version%': 'unknown',
'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command.
@@ -454,6 +453,15 @@
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
+ 'cflags!': [
+ '-O2',
+ '-Os',
+ ],
+ 'cflags': [
+ '-fdata-sections',
+ '-ffunction-sections',
+ '-O3',
+ ],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 3a86e86e02..b3dff3fee1 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -144,6 +144,17 @@ class Value;
template <class T> class Handle;
template <class T> class Local;
template <class T> class Persistent;
+class FunctionTemplate;
+class ObjectTemplate;
+class Data;
+class AccessorInfo;
+template<typename T> class PropertyCallbackInfo;
+class StackTrace;
+class StackFrame;
+class Isolate;
+class DeclaredAccessorDescriptor;
+class ObjectOperationDescriptor;
+class RawOperationDescriptor;
namespace internal {
class Arguments;
@@ -151,6 +162,10 @@ class Heap;
class HeapObject;
class Isolate;
class Object;
+template<typename T>
+class CustomArguments;
+class PropertyCallbackArguments;
+class FunctionCallbackArguments;
}
@@ -695,6 +710,16 @@ template <class T> class Persistent // NOLINT
*/
V8_INLINE(void Reset(Isolate* isolate, const Handle<T>& other));
+ /**
+ * Returns the underlying raw pointer and clears the handle. The caller is
+ * responsible of eventually destroying the underlying object (by creating a
+ * Persistent handle which points to it and Disposing it). In the future,
+ * destructing a Persistent will also Dispose it. With this function, the
+ * embedder can let the Persistent go out of scope without it getting
+ * disposed.
+ */
+ V8_INLINE(T* ClearAndLeak());
+
#ifndef V8_USE_UNSAFE_HANDLES
#ifndef V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
@@ -1779,6 +1804,7 @@ class V8EXPORT String : public Primitive {
*/
class V8EXPORT AsciiValue {
public:
+ // TODO(dcarney): deprecate
explicit AsciiValue(Handle<v8::Value> obj);
~AsciiValue();
char* operator*() { return str_; }
@@ -1853,6 +1879,7 @@ class V8EXPORT Number : public Primitive {
public:
double Value() const;
static Local<Number> New(double value);
+ static Local<Number> New(Isolate* isolate, double value);
V8_INLINE(static Number* Cast(v8::Value* obj));
private:
Number();
@@ -1925,11 +1952,18 @@ enum ExternalArrayType {
*/
typedef Handle<Value> (*AccessorGetter)(Local<String> property,
const AccessorInfo& info);
+typedef void (*AccessorGetterCallback)(
+ Local<String> property,
+ const PropertyCallbackInfo<Value>& info);
typedef void (*AccessorSetter)(Local<String> property,
Local<Value> value,
const AccessorInfo& info);
+typedef void (*AccessorSetterCallback)(
+ Local<String> property,
+ Local<Value> value,
+ const PropertyCallbackInfo<void>& info);
/**
@@ -1999,12 +2033,19 @@ class V8EXPORT Object : public Value {
bool Delete(uint32_t index);
+ // TODO(dcarney): deprecate
bool SetAccessor(Handle<String> name,
AccessorGetter getter,
AccessorSetter setter = 0,
Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None);
+ bool SetAccessor(Handle<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None);
// This function is not yet stable and should not be used at this time.
bool SetAccessor(Handle<String> name,
@@ -2693,13 +2734,36 @@ class V8EXPORT Template : public Data {
};
+template<typename T>
+class V8EXPORT ReturnValue {
+ public:
+ V8_INLINE(explicit ReturnValue(internal::Object** slot));
+ // Handle setters
+ V8_INLINE(void Set(const Persistent<T>& handle));
+ V8_INLINE(void Set(const Handle<T> handle));
+ // Fast primitive setters
+ V8_INLINE(void Set(Isolate* isolate, bool value));
+ V8_INLINE(void Set(Isolate* isolate, double i));
+ V8_INLINE(void Set(Isolate* isolate, int32_t i));
+ V8_INLINE(void Set(Isolate* isolate, uint32_t i));
+ // Fast JS primitive setters
+ V8_INLINE(void SetNull(Isolate* isolate));
+ V8_INLINE(void SetUndefined(Isolate* isolate));
+ private:
+ V8_INLINE(void SetTrue(Isolate* isolate));
+ V8_INLINE(void SetFalse(Isolate* isolate));
+ internal::Object** value_;
+};
+
+
/**
* The argument information given to function call callbacks. This
* class provides access to information about the context of the call,
* including the receiver, the number and values of arguments, and
* the holder of the function.
*/
-class V8EXPORT Arguments {
+template<typename T>
+class V8EXPORT FunctionCallbackInfo {
public:
V8_INLINE(int Length() const);
V8_INLINE(Local<Value> operator[](int i) const);
@@ -2709,15 +2773,20 @@ class V8EXPORT Arguments {
V8_INLINE(bool IsConstructCall() const);
V8_INLINE(Local<Value> Data() const);
V8_INLINE(Isolate* GetIsolate() const);
+ V8_INLINE(ReturnValue<T> GetReturnValue() const);
+ // This shouldn't be public, but the arm compiler needs it.
+ static const int kArgsLength = 5;
- private:
- static const int kIsolateIndex = 0;
- static const int kDataIndex = -1;
- static const int kCalleeIndex = -2;
- static const int kHolderIndex = -3;
-
- friend class ImplementationUtilities;
- V8_INLINE(Arguments(internal::Object** implicit_args,
+ protected:
+ friend class internal::FunctionCallbackArguments;
+ friend class internal::CustomArguments<FunctionCallbackInfo>;
+ static const int kReturnValueIndex = 0;
+ static const int kIsolateIndex = -1;
+ static const int kDataIndex = -2;
+ static const int kCalleeIndex = -3;
+ static const int kHolderIndex = -4;
+
+ V8_INLINE(FunctionCallbackInfo(internal::Object** implicit_args,
internal::Object** values,
int length,
bool is_construct_call));
@@ -2728,25 +2797,56 @@ class V8EXPORT Arguments {
};
+class V8EXPORT Arguments : public FunctionCallbackInfo<Value> {
+ private:
+ friend class internal::FunctionCallbackArguments;
+ V8_INLINE(Arguments(internal::Object** implicit_args,
+ internal::Object** values,
+ int length,
+ bool is_construct_call));
+};
+
/**
- * The information passed to an accessor callback about the context
+ * The information passed to a property callback about the context
* of the property access.
*/
-class V8EXPORT AccessorInfo {
+template<typename T>
+class V8EXPORT PropertyCallbackInfo {
public:
- V8_INLINE(AccessorInfo(internal::Object** args))
- : args_(args) { }
V8_INLINE(Isolate* GetIsolate() const);
V8_INLINE(Local<Value> Data() const);
V8_INLINE(Local<Object> This() const);
V8_INLINE(Local<Object> Holder() const);
+ V8_INLINE(ReturnValue<T> GetReturnValue() const);
+ // This shouldn't be public, but the arm compiler needs it.
+ static const int kArgsLength = 5;
- private:
+ protected:
+ friend class MacroAssembler;
+ friend class internal::PropertyCallbackArguments;
+ friend class internal::CustomArguments<PropertyCallbackInfo>;
+ static const int kThisIndex = 0;
+ static const int kHolderIndex = -1;
+ static const int kDataIndex = -2;
+ static const int kIsolateIndex = -3;
+ static const int kReturnValueIndex = -4;
+
+ V8_INLINE(PropertyCallbackInfo(internal::Object** args))
+ : args_(args) { }
internal::Object** args_;
};
+class V8EXPORT AccessorInfo : public PropertyCallbackInfo<Value> {
+ private:
+ friend class internal::PropertyCallbackArguments;
+ V8_INLINE(AccessorInfo(internal::Object** args))
+ : PropertyCallbackInfo<Value>(args) { }
+};
+
+
typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
+typedef void (*FunctionCallback)(const FunctionCallbackInfo<Value>& info);
/**
* NamedProperty[Getter|Setter] are used as interceptors on object.
@@ -2754,6 +2854,9 @@ typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
*/
typedef Handle<Value> (*NamedPropertyGetter)(Local<String> property,
const AccessorInfo& info);
+typedef void (*NamedPropertyGetterCallback)(
+ Local<String> property,
+ const PropertyCallbackInfo<Value>& info);
/**
@@ -2763,6 +2866,11 @@ typedef Handle<Value> (*NamedPropertyGetter)(Local<String> property,
typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
Local<Value> value,
const AccessorInfo& info);
+typedef void (*NamedPropertySetterCallback)(
+ Local<String> property,
+ Local<Value> value,
+ const PropertyCallbackInfo<Value>& info);
+
/**
* Returns a non-empty handle if the interceptor intercepts the request.
@@ -2771,6 +2879,9 @@ typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
*/
typedef Handle<Integer> (*NamedPropertyQuery)(Local<String> property,
const AccessorInfo& info);
+typedef void (*NamedPropertyQueryCallback)(
+ Local<String> property,
+ const PropertyCallbackInfo<Integer>& info);
/**
@@ -2780,12 +2891,18 @@ typedef Handle<Integer> (*NamedPropertyQuery)(Local<String> property,
*/
typedef Handle<Boolean> (*NamedPropertyDeleter)(Local<String> property,
const AccessorInfo& info);
+typedef void (*NamedPropertyDeleterCallback)(
+ Local<String> property,
+ const PropertyCallbackInfo<Boolean>& info);
+
/**
* Returns an array containing the names of the properties the named
* property getter intercepts.
*/
typedef Handle<Array> (*NamedPropertyEnumerator)(const AccessorInfo& info);
+typedef void (*NamedPropertyEnumeratorCallback)(
+ const PropertyCallbackInfo<Array>& info);
/**
@@ -2794,6 +2911,9 @@ typedef Handle<Array> (*NamedPropertyEnumerator)(const AccessorInfo& info);
*/
typedef Handle<Value> (*IndexedPropertyGetter)(uint32_t index,
const AccessorInfo& info);
+typedef void (*IndexedPropertyGetterCallback)(
+ uint32_t index,
+ const PropertyCallbackInfo<Value>& info);
/**
@@ -2803,6 +2923,10 @@ typedef Handle<Value> (*IndexedPropertyGetter)(uint32_t index,
typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
Local<Value> value,
const AccessorInfo& info);
+typedef void (*IndexedPropertySetterCallback)(
+ uint32_t index,
+ Local<Value> value,
+ const PropertyCallbackInfo<Value>& info);
/**
@@ -2811,6 +2935,10 @@ typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
*/
typedef Handle<Integer> (*IndexedPropertyQuery)(uint32_t index,
const AccessorInfo& info);
+typedef void (*IndexedPropertyQueryCallback)(
+ uint32_t index,
+ const PropertyCallbackInfo<Integer>& info);
+
/**
* Returns a non-empty handle if the deleter intercepts the request.
@@ -2819,12 +2947,18 @@ typedef Handle<Integer> (*IndexedPropertyQuery)(uint32_t index,
*/
typedef Handle<Boolean> (*IndexedPropertyDeleter)(uint32_t index,
const AccessorInfo& info);
+typedef void (*IndexedPropertyDeleterCallback)(
+ uint32_t index,
+ const PropertyCallbackInfo<Boolean>& info);
+
/**
* Returns an array containing the indices of the properties the
* indexed property getter intercepts.
*/
typedef Handle<Array> (*IndexedPropertyEnumerator)(const AccessorInfo& info);
+typedef void (*IndexedPropertyEnumeratorCallback)(
+ const PropertyCallbackInfo<Array>& info);
/**
@@ -2954,11 +3088,18 @@ typedef bool (*IndexedSecurityCallback)(Local<Object> host,
class V8EXPORT FunctionTemplate : public Template {
public:
/** Creates a function template.*/
+ // TODO(dcarney): deprecate
static Local<FunctionTemplate> New(
InvocationCallback callback = 0,
Handle<Value> data = Handle<Value>(),
Handle<Signature> signature = Handle<Signature>(),
int length = 0);
+ static Local<FunctionTemplate> New(
+ FunctionCallback callback, // TODO(dcarney): add back default param.
+ Handle<Value> data = Handle<Value>(),
+ Handle<Signature> signature = Handle<Signature>(),
+ int length = 0);
+
/** Returns the unique function instance in the current execution context.*/
Local<Function> GetFunction();
@@ -2967,8 +3108,11 @@ class V8EXPORT FunctionTemplate : public Template {
* callback is called whenever the function created from this
* FunctionTemplate is called.
*/
+ // TODO(dcarney): deprecate
void SetCallHandler(InvocationCallback callback,
Handle<Value> data = Handle<Value>());
+ void SetCallHandler(FunctionCallback callback,
+ Handle<Value> data = Handle<Value>());
/** Set the predefined length property for the FunctionTemplate. */
void SetLength(int length);
@@ -3020,21 +3164,6 @@ class V8EXPORT FunctionTemplate : public Template {
private:
FunctionTemplate();
- void SetNamedInstancePropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data);
- void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
- Handle<Value> data);
- void SetInstanceCallAsFunctionHandler(InvocationCallback callback,
- Handle<Value> data);
-
friend class Context;
friend class ObjectTemplate;
};
@@ -3083,6 +3212,7 @@ class V8EXPORT ObjectTemplate : public Template {
* defined by FunctionTemplate::HasInstance()), an implicit TypeError is
* thrown and no callback is invoked.
*/
+ // TODO(dcarney): deprecate
void SetAccessor(Handle<String> name,
AccessorGetter getter,
AccessorSetter setter = 0,
@@ -3091,6 +3221,14 @@ class V8EXPORT ObjectTemplate : public Template {
PropertyAttribute attribute = None,
Handle<AccessorSignature> signature =
Handle<AccessorSignature>());
+ void SetAccessor(Handle<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None,
+ Handle<AccessorSignature> signature =
+ Handle<AccessorSignature>());
// This function is not yet stable and should not be used at this time.
bool SetAccessor(Handle<String> name,
@@ -3117,12 +3255,20 @@ class V8EXPORT ObjectTemplate : public Template {
* \param data A piece of data that will be passed to the callbacks
* whenever they are invoked.
*/
+ // TODO(dcarney): deprecate
void SetNamedPropertyHandler(NamedPropertyGetter getter,
NamedPropertySetter setter = 0,
NamedPropertyQuery query = 0,
NamedPropertyDeleter deleter = 0,
NamedPropertyEnumerator enumerator = 0,
Handle<Value> data = Handle<Value>());
+ void SetNamedPropertyHandler(
+ NamedPropertyGetterCallback getter,
+ NamedPropertySetterCallback setter = 0,
+ NamedPropertyQueryCallback query = 0,
+ NamedPropertyDeleterCallback deleter = 0,
+ NamedPropertyEnumeratorCallback enumerator = 0,
+ Handle<Value> data = Handle<Value>());
/**
* Sets an indexed property handler on the object template.
@@ -3140,12 +3286,20 @@ class V8EXPORT ObjectTemplate : public Template {
* \param data A piece of data that will be passed to the callbacks
* whenever they are invoked.
*/
+ // TODO(dcarney): deprecate
void SetIndexedPropertyHandler(IndexedPropertyGetter getter,
IndexedPropertySetter setter = 0,
IndexedPropertyQuery query = 0,
IndexedPropertyDeleter deleter = 0,
IndexedPropertyEnumerator enumerator = 0,
Handle<Value> data = Handle<Value>());
+ void SetIndexedPropertyHandler(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter = 0,
+ IndexedPropertyQueryCallback query = 0,
+ IndexedPropertyDeleterCallback deleter = 0,
+ IndexedPropertyEnumeratorCallback enumerator = 0,
+ Handle<Value> data = Handle<Value>());
/**
* Sets the callback to be used when calling instances created from
@@ -3153,8 +3307,11 @@ class V8EXPORT ObjectTemplate : public Template {
* behave like normal JavaScript objects that cannot be called as a
* function.
*/
+ // TODO(dcarney): deprecate
void SetCallAsFunctionHandler(InvocationCallback callback,
Handle<Value> data = Handle<Value>());
+ void SetCallAsFunctionHandler(FunctionCallback callback,
+ Handle<Value> data = Handle<Value>());
/**
* Mark object instances of the template as undetectable.
@@ -3830,25 +3987,29 @@ struct JitCodeEvent {
// CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
void* user_data;
+ struct name_t {
+ // Name of the object associated with the code, note that the string is not
+ // zero-terminated.
+ const char* str;
+ // Number of chars in str.
+ size_t len;
+ };
+
+ struct line_info_t {
+ // PC offset
+ size_t offset;
+ // Code postion
+ size_t pos;
+ // The position type.
+ PositionType position_type;
+ };
+
union {
// Only valid for CODE_ADDED.
- struct {
- // Name of the object associated with the code, note that the string is
- // not zero-terminated.
- const char* str;
- // Number of chars in str.
- size_t len;
- } name;
+ struct name_t name;
// Only valid for CODE_ADD_LINE_POS_INFO
- struct {
- // PC offset
- size_t offset;
- // Code postion
- size_t pos;
- // The position type.
- PositionType position_type;
- } line_info;
+ struct line_info_t line_info;
// New location of instructions. Only valid for CODE_MOVED.
void* new_code_start;
@@ -4609,11 +4770,10 @@ class V8EXPORT Context {
Handle<Value> global_object = Handle<Value>());
/** Deprecated. Use Isolate version instead. */
- // TODO(mstarzinger): Put this behind the V8_DEPRECATED guard.
- static Persistent<Context> New(
+ V8_DEPRECATED(static Persistent<Context> New(
ExtensionConfiguration* extensions = NULL,
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
- Handle<Value> global_object = Handle<Value>());
+ Handle<Value> global_object = Handle<Value>()));
/** Returns the last entered context. */
static Local<Context> GetEntered();
@@ -4982,6 +5142,14 @@ const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
template <size_t ptr_size> struct SmiTagging;
+template<int kSmiShiftSize>
+V8_INLINE(internal::Object* IntToSmi(int value)) {
+ int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
+ intptr_t tagged_value =
+ (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
+ return reinterpret_cast<internal::Object*>(tagged_value);
+}
+
// Smi constants for 32-bit systems.
template <> struct SmiTagging<4> {
static const int kSmiShiftSize = 0;
@@ -4991,6 +5159,23 @@ template <> struct SmiTagging<4> {
// Throw away top 32 bits and shift down (requires >> to be sign extending).
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
}
+ V8_INLINE(static internal::Object* IntToSmi(int value)) {
+ return internal::IntToSmi<kSmiShiftSize>(value);
+ }
+ V8_INLINE(static bool IsValidSmi(intptr_t value)) {
+ // To be representable as an tagged small integer, the two
+ // most-significant bits of 'value' must be either 00 or 11 due to
+ // sign-extension. To check this we add 01 to the two
+ // most-significant bits, and check if the most-significant bit is 0
+ //
+ // CAUTION: The original code below:
+ // bool result = ((value + 0x40000000) & 0x80000000) == 0;
+ // may lead to incorrect results according to the C language spec, and
+ // in fact doesn't work correctly with gcc4.1.1 in some cases: The
+ // compiler may produce undefined results in case of signed integer
+ // overflow. The computation must be done w/ unsigned ints.
+ return static_cast<uintptr_t>(value + 0x40000000U) < 0x80000000U;
+ }
};
// Smi constants for 64-bit systems.
@@ -5002,6 +5187,13 @@ template <> struct SmiTagging<8> {
// Shift down and throw away top 32 bits.
return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
}
+ V8_INLINE(static internal::Object* IntToSmi(int value)) {
+ return internal::IntToSmi<kSmiShiftSize>(value);
+ }
+ V8_INLINE(static bool IsValidSmi(intptr_t value)) {
+ // To be representable as a long smi, the value must be a 32-bit integer.
+ return (value == static_cast<int32_t>(value));
+ }
};
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
@@ -5026,7 +5218,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 65;
+ static const int kContextEmbedderDataIndex = 64;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@@ -5039,7 +5231,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 118;
+ static const int kEmptyStringRootIndex = 127;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -5066,6 +5258,14 @@ class Internals {
return PlatformSmiTagging::SmiToInt(value);
}
+ V8_INLINE(static internal::Object* IntToSmi(int value)) {
+ return PlatformSmiTagging::IntToSmi(value);
+ }
+
+ V8_INLINE(static bool IsValidSmi(intptr_t value)) {
+ return PlatformSmiTagging::IsValidSmi(value);
+ }
+
V8_INLINE(static int GetInstanceType(internal::Object* obj)) {
typedef internal::Object O;
O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
@@ -5384,6 +5584,7 @@ void Persistent<T>::SetWrapperClassId(uint16_t class_id) {
SetWrapperClassId(Isolate::GetCurrent(), class_id);
}
+
template <class T>
void Persistent<T>::Reset(Isolate* isolate, const Handle<T>& other) {
Dispose(isolate);
@@ -5400,6 +5601,21 @@ void Persistent<T>::Reset(Isolate* isolate, const Handle<T>& other) {
#endif
}
+
+template <class T>
+T* Persistent<T>::ClearAndLeak() {
+ T* old;
+#ifdef V8_USE_UNSAFE_HANDLES
+ old = **this;
+ *this = Persistent<T>();
+#else
+ old = val_;
+ val_ = NULL;
+#endif
+ return old;
+}
+
+
template <class T>
void Persistent<T>::SetWrapperClassId(Isolate* isolate, uint16_t class_id) {
typedef internal::Internals I;
@@ -5425,54 +5641,150 @@ uint16_t Persistent<T>::WrapperClassId(Isolate* isolate) const {
return *reinterpret_cast<uint16_t*>(addr);
}
-Arguments::Arguments(internal::Object** implicit_args,
- internal::Object** values, int length,
- bool is_construct_call)
+
+template<typename T>
+ReturnValue<T>::ReturnValue(internal::Object** slot) : value_(slot) {}
+
+template<typename T>
+void ReturnValue<T>::Set(const Persistent<T>& handle) {
+ *value_ = *reinterpret_cast<internal::Object**>(*handle);
+}
+
+template<typename T>
+void ReturnValue<T>::Set(const Handle<T> handle) {
+ *value_ = *reinterpret_cast<internal::Object**>(*handle);
+}
+
+template<typename T>
+void ReturnValue<T>::Set(Isolate* isolate, double i) {
+ Set(Number::New(isolate, i));
+}
+
+template<typename T>
+void ReturnValue<T>::Set(Isolate* isolate, int32_t i) {
+ typedef internal::Internals I;
+ if (V8_LIKELY(I::IsValidSmi(i))) {
+ *value_ = I::IntToSmi(i);
+ return;
+ }
+ Set(Integer::New(i, isolate));
+}
+
+template<typename T>
+void ReturnValue<T>::Set(Isolate* isolate, uint32_t i) {
+ typedef internal::Internals I;
+ if (V8_LIKELY(I::IsValidSmi(i))) {
+ *value_ = I::IntToSmi(i);
+ return;
+ }
+ Set(Integer::NewFromUnsigned(i, isolate));
+}
+
+template<typename T>
+void ReturnValue<T>::Set(Isolate* isolate, bool value) {
+ if (value) {
+ SetTrue(isolate);
+ } else {
+ SetFalse(isolate);
+ }
+}
+
+template<typename T>
+void ReturnValue<T>::SetTrue(Isolate* isolate) {
+ typedef internal::Internals I;
+ *value_ = *I::GetRoot(isolate, I::kTrueValueRootIndex);
+}
+
+template<typename T>
+void ReturnValue<T>::SetFalse(Isolate* isolate) {
+ typedef internal::Internals I;
+ *value_ = *I::GetRoot(isolate, I::kFalseValueRootIndex);
+}
+
+template<typename T>
+void ReturnValue<T>::SetNull(Isolate* isolate) {
+ typedef internal::Internals I;
+ *value_ = *I::GetRoot(isolate, I::kNullValueRootIndex);
+}
+
+template<typename T>
+void ReturnValue<T>::SetUndefined(Isolate* isolate) {
+ typedef internal::Internals I;
+ *value_ = *I::GetRoot(isolate, I::kUndefinedValueRootIndex);
+}
+
+
+template<typename T>
+FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Object** implicit_args,
+ internal::Object** values,
+ int length,
+ bool is_construct_call)
: implicit_args_(implicit_args),
values_(values),
length_(length),
is_construct_call_(is_construct_call) { }
-Local<Value> Arguments::operator[](int i) const {
+Arguments::Arguments(internal::Object** args,
+ internal::Object** values,
+ int length,
+ bool is_construct_call)
+ : FunctionCallbackInfo<Value>(args, values, length, is_construct_call) { }
+
+
+template<typename T>
+Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
return Local<Value>(reinterpret_cast<Value*>(values_ - i));
}
-Local<Function> Arguments::Callee() const {
+template<typename T>
+Local<Function> FunctionCallbackInfo<T>::Callee() const {
return Local<Function>(reinterpret_cast<Function*>(
&implicit_args_[kCalleeIndex]));
}
-Local<Object> Arguments::This() const {
+template<typename T>
+Local<Object> FunctionCallbackInfo<T>::This() const {
return Local<Object>(reinterpret_cast<Object*>(values_ + 1));
}
-Local<Object> Arguments::Holder() const {
+template<typename T>
+Local<Object> FunctionCallbackInfo<T>::Holder() const {
return Local<Object>(reinterpret_cast<Object*>(
&implicit_args_[kHolderIndex]));
}
-Local<Value> Arguments::Data() const {
+template<typename T>
+Local<Value> FunctionCallbackInfo<T>::Data() const {
return Local<Value>(reinterpret_cast<Value*>(&implicit_args_[kDataIndex]));
}
-Isolate* Arguments::GetIsolate() const {
+template<typename T>
+Isolate* FunctionCallbackInfo<T>::GetIsolate() const {
return *reinterpret_cast<Isolate**>(&implicit_args_[kIsolateIndex]);
}
-bool Arguments::IsConstructCall() const {
+template<typename T>
+ReturnValue<T> FunctionCallbackInfo<T>::GetReturnValue() const {
+ return ReturnValue<T>(&implicit_args_[kReturnValueIndex]);
+}
+
+
+template<typename T>
+bool FunctionCallbackInfo<T>::IsConstructCall() const {
return is_construct_call_;
}
-int Arguments::Length() const {
+template<typename T>
+int FunctionCallbackInfo<T>::Length() const {
return length_;
}
@@ -5861,23 +6173,33 @@ External* External::Cast(v8::Value* value) {
}
-Isolate* AccessorInfo::GetIsolate() const {
- return *reinterpret_cast<Isolate**>(&args_[-3]);
+template<typename T>
+Isolate* PropertyCallbackInfo<T>::GetIsolate() const {
+ return *reinterpret_cast<Isolate**>(&args_[kIsolateIndex]);
+}
+
+
+template<typename T>
+Local<Value> PropertyCallbackInfo<T>::Data() const {
+ return Local<Value>(reinterpret_cast<Value*>(&args_[kDataIndex]));
}
-Local<Value> AccessorInfo::Data() const {
- return Local<Value>(reinterpret_cast<Value*>(&args_[-2]));
+template<typename T>
+Local<Object> PropertyCallbackInfo<T>::This() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[kThisIndex]));
}
-Local<Object> AccessorInfo::This() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[0]));
+template<typename T>
+Local<Object> PropertyCallbackInfo<T>::Holder() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[kHolderIndex]));
}
-Local<Object> AccessorInfo::Holder() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[-1]));
+template<typename T>
+ReturnValue<T> PropertyCallbackInfo<T>::GetReturnValue() const {
+ return ReturnValue<T>(&args_[kReturnValueIndex]);
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 8a6eaf4765..7099ca8ddd 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -983,8 +983,12 @@ void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
}
-Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
- v8::Handle<Value> data, v8::Handle<Signature> signature, int length) {
+template<typename Callback>
+static Local<FunctionTemplate> FunctionTemplateNew(
+ Callback callback_in,
+ v8::Handle<Value> data,
+ v8::Handle<Signature> signature,
+ int length) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
LOG_API(isolate, "FunctionTemplate::New");
@@ -997,8 +1001,10 @@ Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
int next_serial_number = isolate->next_serial_number();
isolate->set_next_serial_number(next_serial_number + 1);
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
- if (callback != 0) {
+ if (callback_in != 0) {
if (data.IsEmpty()) data = v8::Undefined();
+ InvocationCallback callback =
+ i::CallbackTable::Register(isolate, callback_in);
Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
obj->set_length(length);
@@ -1011,6 +1017,24 @@ Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
}
+Local<FunctionTemplate> FunctionTemplate::New(
+ InvocationCallback callback,
+ v8::Handle<Value> data,
+ v8::Handle<Signature> signature,
+ int length) {
+ return FunctionTemplateNew(callback, data, signature, length);
+}
+
+
+Local<FunctionTemplate> FunctionTemplate::New(
+ FunctionCallback callback,
+ v8::Handle<Value> data,
+ v8::Handle<Signature> signature,
+ int length) {
+ return FunctionTemplateNew(callback, data, signature, length);
+}
+
+
Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
int argc, Handle<FunctionTemplate> argv[]) {
i::Isolate* isolate = i::Isolate::Current();
@@ -1202,9 +1226,11 @@ int TypeSwitch::match(v8::Handle<Value> value) {
} while (false)
-void FunctionTemplate::SetCallHandler(InvocationCallback callback,
- v8::Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+template<typename Callback>
+static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template,
+ Callback callback,
+ v8::Handle<Value> data) {
+ i::Isolate* isolate = Utils::OpenHandle(function_template)->GetIsolate();
if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -1215,9 +1241,18 @@ void FunctionTemplate::SetCallHandler(InvocationCallback callback,
SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_call_code(*obj);
+ Utils::OpenHandle(function_template)->set_call_code(*obj);
+}
+
+void FunctionTemplate::SetCallHandler(InvocationCallback callback,
+ v8::Handle<Value> data) {
+ FunctionTemplateSetCallHandler(this, callback, data);
}
+void FunctionTemplate::SetCallHandler(FunctionCallback callback,
+ v8::Handle<Value> data) {
+ FunctionTemplateSetCallHandler(this, callback, data);
+}
static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
i::Handle<i::AccessorInfo> obj,
@@ -1237,10 +1272,11 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
}
+template<typename Getter, typename Setter>
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
+ Getter getter_in,
+ Setter setter_in,
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes,
@@ -1248,7 +1284,9 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::ExecutableAccessorInfo> obj =
isolate->factory()->NewExecutableAccessorInfo();
+ AccessorGetter getter = i::CallbackTable::Register(isolate, getter_in);
SET_FIELD_WRAPPED(obj, set_getter, getter);
+ AccessorSetter setter = i::CallbackTable::Register(isolate, setter_in);
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
@@ -1259,6 +1297,8 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Handle<String> name,
v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
+ void* setter_ignored,
+ void* data_ignored,
v8::AccessControl settings,
v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
@@ -1323,15 +1363,21 @@ void FunctionTemplate::ReadOnlyPrototype() {
Utils::OpenHandle(this)->set_read_only_prototype(true);
}
-
-void FunctionTemplate::SetNamedInstancePropertyHandler(
- NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
+template<
+ typename Getter,
+ typename Setter,
+ typename Query,
+ typename Deleter,
+ typename Enumerator>
+static void SetNamedInstancePropertyHandler(
+ i::Handle<i::FunctionTemplateInfo> function_template,
+ Getter getter_in,
+ Setter setter_in,
+ Query query_in,
+ Deleter remover_in,
+ Enumerator enumerator_in,
Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::Isolate* isolate = function_template->GetIsolate();
if (IsDeadCheck(isolate,
"v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
return;
@@ -1343,26 +1389,40 @@ void FunctionTemplate::SetNamedInstancePropertyHandler(
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ NamedPropertyGetter getter = i::CallbackTable::Register(isolate, getter_in);
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ NamedPropertySetter setter = i::CallbackTable::Register(isolate, setter_in);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ NamedPropertyQuery query = i::CallbackTable::Register(isolate, query_in);
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ NamedPropertyDeleter remover =
+ i::CallbackTable::Register(isolate, remover_in);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ NamedPropertyEnumerator enumerator =
+ i::CallbackTable::Register(isolate, enumerator_in);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_named_property_handler(*obj);
-}
-
-
-void FunctionTemplate::SetIndexedInstancePropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
+ function_template->set_named_property_handler(*obj);
+}
+
+
+template<
+ typename Getter,
+ typename Setter,
+ typename Query,
+ typename Deleter,
+ typename Enumerator>
+static void SetIndexedInstancePropertyHandler(
+ i::Handle<i::FunctionTemplateInfo> function_template,
+ Getter getter_in,
+ Setter setter_in,
+ Query query_in,
+ Deleter remover_in,
+ Enumerator enumerator_in,
Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::Isolate* isolate = function_template->GetIsolate();
if (IsDeadCheck(isolate,
"v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
return;
@@ -1374,22 +1434,33 @@ void FunctionTemplate::SetIndexedInstancePropertyHandler(
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ IndexedPropertyGetter getter =
+ i::CallbackTable::Register(isolate, getter_in);
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ IndexedPropertySetter setter =
+ i::CallbackTable::Register(isolate, setter_in);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ IndexedPropertyQuery query = i::CallbackTable::Register(isolate, query_in);
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ IndexedPropertyDeleter remover =
+ i::CallbackTable::Register(isolate, remover_in);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ IndexedPropertyEnumerator enumerator =
+ i::CallbackTable::Register(isolate, enumerator_in);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
+ function_template->set_indexed_property_handler(*obj);
}
-void FunctionTemplate::SetInstanceCallAsFunctionHandler(
- InvocationCallback callback,
+template<typename Callback>
+static void SetInstanceCallAsFunctionHandler(
+ i::Handle<i::FunctionTemplateInfo> function_template,
+ Callback callback_in,
Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::Isolate* isolate = function_template->GetIsolate();
if (IsDeadCheck(isolate,
"v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
return;
@@ -1400,10 +1471,12 @@ void FunctionTemplate::SetInstanceCallAsFunctionHandler(
isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ InvocationCallback callback =
+ i::CallbackTable::Register(isolate, callback_in);
SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_instance_call_handler(*obj);
+ function_template->set_instance_call_handler(*obj);
}
@@ -1461,6 +1534,32 @@ static inline void AddPropertyToFunctionTemplate(
}
+template<typename Setter, typename Getter, typename Data>
+static bool ObjectTemplateSetAccessor(
+ ObjectTemplate* object_template,
+ v8::Handle<String> name,
+ Getter getter,
+ Setter setter,
+ Data data,
+ AccessControl settings,
+ PropertyAttribute attribute,
+ v8::Handle<AccessorSignature> signature) {
+ i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ EnsureConstructor(object_template);
+ i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
+ Utils::OpenHandle(object_template)->constructor());
+ i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
+ name, getter, setter, data, settings, attribute, signature);
+ if (obj.is_null()) return false;
+ AddPropertyToFunctionTemplate(cons, obj);
+ return true;
+}
+
+
void ObjectTemplate::SetAccessor(v8::Handle<String> name,
AccessorGetter getter,
AccessorSetter setter,
@@ -1468,64 +1567,89 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
AccessControl settings,
PropertyAttribute attribute,
v8::Handle<AccessorSignature> signature) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data,
- settings, attribute,
- signature);
- AddPropertyToFunctionTemplate(cons, obj);
+ ObjectTemplateSetAccessor(
+ this, name, getter, setter, data, settings, attribute, signature);
}
-bool ObjectTemplate::SetAccessor(Handle<String> name,
- Handle<DeclaredAccessorDescriptor> descriptor,
+void ObjectTemplate::SetAccessor(v8::Handle<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attribute,
- Handle<AccessorSignature> signature) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
- name, descriptor, settings, attribute, signature);
- if (obj.is_null()) return false;
- AddPropertyToFunctionTemplate(cons, obj);
- return true;
+ v8::Handle<AccessorSignature> signature) {
+ ObjectTemplateSetAccessor(
+ this, name, getter, setter, data, settings, attribute, signature);
}
-void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+bool ObjectTemplate::SetAccessor(Handle<String> name,
+ Handle<DeclaredAccessorDescriptor> descriptor,
+ AccessControl settings,
+ PropertyAttribute attribute,
+ Handle<AccessorSignature> signature) {
+ void* null = NULL;
+ return ObjectTemplateSetAccessor(
+ this, name, descriptor, null, null, settings, attribute, signature);
+}
+
+
+template<
+ typename Getter,
+ typename Setter,
+ typename Query,
+ typename Deleter,
+ typename Enumerator>
+static void ObjectTemplateSetNamedPropertyHandler(
+ ObjectTemplate* object_template,
+ Getter getter,
+ Setter setter,
+ Query query,
+ Deleter remover,
+ Enumerator enumerator,
+ Handle<Value> data) {
+ i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
return;
}
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ EnsureConstructor(object_template);
+ i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
+ Utils::OpenHandle(object_template)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- data);
+ SetNamedInstancePropertyHandler(cons,
+ getter,
+ setter,
+ query,
+ remover,
+ enumerator,
+ data);
+}
+
+
+void ObjectTemplate::SetNamedPropertyHandler(
+ NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ ObjectTemplateSetNamedPropertyHandler(
+ this, getter, setter, query, remover, enumerator, data);
+}
+
+
+void ObjectTemplate::SetNamedPropertyHandler(
+ NamedPropertyGetterCallback getter,
+ NamedPropertySetterCallback setter,
+ NamedPropertyQueryCallback query,
+ NamedPropertyDeleterCallback remover,
+ NamedPropertyEnumeratorCallback enumerator,
+ Handle<Value> data) {
+ ObjectTemplateSetNamedPropertyHandler(
+ this, getter, setter, query, remover, enumerator, data);
}
@@ -1574,46 +1698,93 @@ void ObjectTemplate::SetAccessCheckCallbacks(
}
-void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
+template<
+ typename Getter,
+ typename Setter,
+ typename Query,
+ typename Deleter,
+ typename Enumerator>
+void ObjectTemplateSetIndexedPropertyHandler(
+ ObjectTemplate* object_template,
+ Getter getter,
+ Setter setter,
+ Query query,
+ Deleter remover,
+ Enumerator enumerator,
Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
return;
}
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ EnsureConstructor(object_template);
+ i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
+ Utils::OpenHandle(object_template)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter,
- setter,
- query,
- remover,
- enumerator,
- data);
+ SetIndexedInstancePropertyHandler(cons,
+ getter,
+ setter,
+ query,
+ remover,
+ enumerator,
+ data);
}
-void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+void ObjectTemplate::SetIndexedPropertyHandler(
+ IndexedPropertyGetter getter,
+ IndexedPropertySetter setter,
+ IndexedPropertyQuery query,
+ IndexedPropertyDeleter remover,
+ IndexedPropertyEnumerator enumerator,
+ Handle<Value> data) {
+ ObjectTemplateSetIndexedPropertyHandler(
+ this, getter, setter, query, remover, enumerator, data);
+}
+
+
+void ObjectTemplate::SetIndexedPropertyHandler(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter,
+ IndexedPropertyQueryCallback query,
+ IndexedPropertyDeleterCallback remover,
+ IndexedPropertyEnumeratorCallback enumerator,
+ Handle<Value> data) {
+ ObjectTemplateSetIndexedPropertyHandler(
+ this, getter, setter, query, remover, enumerator, data);
+}
+
+
+template<typename Callback>
+static void ObjectTemplateSetCallAsFunctionHandler(
+ ObjectTemplate* object_template,
+ Callback callback,
+ Handle<Value> data) {
+ i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
if (IsDeadCheck(isolate,
"v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
return;
}
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
+ EnsureConstructor(object_template);
+ i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
+ Utils::OpenHandle(object_template)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- Utils::ToLocal(cons)->SetInstanceCallAsFunctionHandler(callback, data);
+ SetInstanceCallAsFunctionHandler(cons, callback, data);
+}
+
+
+void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
+ Handle<Value> data) {
+ return ObjectTemplateSetCallAsFunctionHandler(this, callback, data);
+}
+
+
+void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
+ Handle<Value> data) {
+ return ObjectTemplateSetCallAsFunctionHandler(this, callback, data);
}
@@ -3446,7 +3617,21 @@ bool v8::Object::Has(uint32_t index) {
}
-static inline bool SetAccessor(Object* obj, i::Handle<i::AccessorInfo> info) {
+template<typename Setter, typename Getter, typename Data>
+static inline bool ObjectSetAccessor(Object* obj,
+ Handle<String> name,
+ Setter getter,
+ Getter setter,
+ Data data,
+ AccessControl settings,
+ PropertyAttribute attributes) {
+ i::Isolate* isolate = Utils::OpenHandle(obj)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ v8::Handle<AccessorSignature> signature;
+ i::Handle<i::AccessorInfo> info = MakeAccessorInfo(
+ name, getter, setter, data, settings, attributes, signature);
if (info.is_null()) return false;
bool fast = Utils::OpenHandle(obj)->HasFastProperties();
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(obj), info);
@@ -3462,15 +3647,19 @@ bool Object::SetAccessor(Handle<String> name,
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attributes) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- v8::Handle<AccessorSignature> signature;
- i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name, getter, setter, data,
- settings, attributes,
- signature);
- return v8::SetAccessor(this, info);
+ return ObjectSetAccessor(
+ this, name, getter, setter, data, settings, attributes);
+}
+
+
+bool Object::SetAccessor(Handle<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ v8::Handle<Value> data,
+ AccessControl settings,
+ PropertyAttribute attributes) {
+ return ObjectSetAccessor(
+ this, name, getter, setter, data, settings, attributes);
}
@@ -3478,14 +3667,9 @@ bool Object::SetAccessor(Handle<String> name,
Handle<DeclaredAccessorDescriptor> descriptor,
AccessControl settings,
PropertyAttribute attributes) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- v8::Handle<AccessorSignature> signature;
- i::Handle<i::AccessorInfo> info = MakeAccessorInfo(
- name, descriptor, settings, attributes, signature);
- return v8::SetAccessor(this, info);
+ void* null = NULL;
+ return ObjectSetAccessor(
+ this, name, descriptor, null, null, settings, attributes);
}
@@ -5953,10 +6137,6 @@ i::Handle<i::JSTypedArray> NewTypedArray(
isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- i::Handle<i::Map> map =
- isolate->factory()->GetElementsTransitionMap(
- obj, elements_kind);
- obj->set_map(*map);
obj->set_elements(*elements);
return obj;
}
@@ -6027,12 +6207,19 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
Local<Number> v8::Number::New(double value) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Number::New()");
+ return Number::New(reinterpret_cast<Isolate*>(isolate), value);
+}
+
+
+Local<Number> v8::Number::New(Isolate* isolate, double value) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ASSERT(internal_isolate->IsInitialized());
if (std::isnan(value)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
value = i::OS::nan_value();
}
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
+ ENTER_V8(internal_isolate);
+ i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::NumberToLocal(result);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 686abf75c4..12d6e3d082 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -149,12 +149,10 @@ class RegisteredExtension {
static void UnregisterAll();
Extension* extension() { return extension_; }
RegisteredExtension* next() { return next_; }
- RegisteredExtension* next_auto() { return next_auto_; }
static RegisteredExtension* first_extension() { return first_extension_; }
private:
Extension* extension_;
RegisteredExtension* next_;
- RegisteredExtension* next_auto_;
static RegisteredExtension* first_extension_;
};
diff --git a/deps/v8/src/apiutils.h b/deps/v8/src/apiutils.h
index 9831f08669..0765585649 100644
--- a/deps/v8/src/apiutils.h
+++ b/deps/v8/src/apiutils.h
@@ -39,31 +39,6 @@ class ImplementationUtilities {
return that->names_;
}
- // Packs additional parameters for the NewArguments function. |implicit_args|
- // is a pointer to the last element of 4-elements array controlled by GC.
- static void PrepareArgumentsData(internal::Object** implicit_args,
- internal::Isolate* isolate,
- internal::Object* data,
- internal::JSFunction* callee,
- internal::Object* holder) {
- implicit_args[v8::Arguments::kDataIndex] = data;
- implicit_args[v8::Arguments::kCalleeIndex] = callee;
- implicit_args[v8::Arguments::kHolderIndex] = holder;
- implicit_args[v8::Arguments::kIsolateIndex] =
- reinterpret_cast<internal::Object*>(isolate);
- }
-
- static v8::Arguments NewArguments(internal::Object** implicit_args,
- internal::Object** argv, int argc,
- bool is_construct_call) {
- ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction());
- ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject());
- // The implicit isolate argument is not tagged and looks like a SMI.
- ASSERT(implicit_args[v8::Arguments::kIsolateIndex]->IsSmi());
-
- return v8::Arguments(implicit_args, argv, argc, is_construct_call);
- }
-
// Introduce an alias for the handle scope data to allow non-friends
// to access the HandleScope data.
typedef v8::HandleScope::Data HandleScopeData;
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
new file mode 100644
index 0000000000..091d0b92a4
--- /dev/null
+++ b/deps/v8/src/arguments.cc
@@ -0,0 +1,195 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "arguments.h"
+
+namespace v8 {
+namespace internal {
+
+
+static bool Match(void* a, void* b) {
+ return a == b;
+}
+
+
+static uint32_t Hash(void* function) {
+ uintptr_t as_int = reinterpret_cast<uintptr_t>(function);
+ if (sizeof(function) == 4) return static_cast<uint32_t>(as_int);
+ uint64_t as_64 = static_cast<uint64_t>(as_int);
+ return
+ static_cast<uint32_t>(as_64 >> 32) ^
+ static_cast<uint32_t>(as_64);
+}
+
+
+CallbackTable::CallbackTable(): map_(Match, 64) {}
+
+
+bool CallbackTable::Contains(void* function) {
+ ASSERT(function != NULL);
+ return map_.Lookup(function, Hash(function), false) != NULL;
+}
+
+
+void CallbackTable::InsertCallback(Isolate* isolate,
+ void* function,
+ bool returns_void) {
+ if (function == NULL) return;
+ // Don't store for performance.
+ if (kStoreVoidFunctions != returns_void) return;
+ CallbackTable* table = isolate->callback_table();
+ if (table == NULL) {
+ table = new CallbackTable();
+ isolate->set_callback_table(table);
+ }
+ typedef HashMap::Entry Entry;
+ Entry* entry = table->map_.Lookup(function, Hash(function), true);
+ ASSERT(entry != NULL);
+ ASSERT(entry->value == NULL || entry->value == function);
+ entry->value = function;
+}
+
+
+template<typename T>
+template<typename V>
+v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
+ // Check the ReturnValue.
+ Object** handle = &this->end()[kReturnValueOffset];
+ // Nothing was set, return empty handle as per previous behaviour.
+ if ((*handle)->IsTheHole()) return v8::Handle<V>();
+ return v8::Handle<V>(reinterpret_cast<V*>(handle));
+}
+
+
+v8::Handle<v8::Value> FunctionCallbackArguments::Call(InvocationCallback f) {
+ Isolate* isolate = this->isolate();
+ void* f_as_void = CallbackTable::FunctionToVoidPtr(f);
+ bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);
+ if (new_style) {
+ FunctionCallback c = reinterpret_cast<FunctionCallback>(f);
+ FunctionCallbackInfo<v8::Value> info(end(),
+ argv_,
+ argc_,
+ is_construct_call_);
+ c(info);
+ } else {
+ v8::Arguments args(end(),
+ argv_,
+ argc_,
+ is_construct_call_);
+ v8::Handle<v8::Value> return_value = f(args);
+ if (!return_value.IsEmpty()) return return_value;
+ }
+ return GetReturnValue<v8::Value>(isolate);
+}
+
+
+#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f) { \
+ Isolate* isolate = this->isolate(); \
+ void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
+ bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
+ if (new_style) { \
+ NewFunction c = reinterpret_cast<NewFunction>(f); \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ c(info); \
+ } else { \
+ v8::AccessorInfo info(end()); \
+ v8::Handle<ReturnValue> return_value = f(info); \
+ if (!return_value.IsEmpty()) return return_value; \
+ } \
+ return GetReturnValue<ReturnValue>(isolate); \
+}
+
+#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
+ Arg1 arg1) { \
+ Isolate* isolate = this->isolate(); \
+ void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
+ bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
+ if (new_style) { \
+ NewFunction c = reinterpret_cast<NewFunction>(f); \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ c(arg1, info); \
+ } else { \
+ v8::AccessorInfo info(end()); \
+ v8::Handle<ReturnValue> return_value = f(arg1, info); \
+ if (!return_value.IsEmpty()) return return_value; \
+ } \
+ return GetReturnValue<ReturnValue>(isolate); \
+}
+
+#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
+ Arg1 arg1, \
+ Arg2 arg2) { \
+ Isolate* isolate = this->isolate(); \
+ void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
+ bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
+ if (new_style) { \
+ NewFunction c = reinterpret_cast<NewFunction>(f); \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ c(arg1, arg2, info); \
+ } else { \
+ v8::AccessorInfo info(end()); \
+ v8::Handle<ReturnValue> return_value = f(arg1, arg2, info); \
+ if (!return_value.IsEmpty()) return return_value; \
+ } \
+ return GetReturnValue<ReturnValue>(isolate); \
+}
+
+#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
+void PropertyCallbackArguments::Call(OldFunction f, \
+ Arg1 arg1, \
+ Arg2 arg2) { \
+ Isolate* isolate = this->isolate(); \
+ void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
+ bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
+ if (new_style) { \
+ NewFunction c = reinterpret_cast<NewFunction>(f); \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ c(arg1, arg2, info); \
+ } else { \
+ v8::AccessorInfo info(end()); \
+ f(arg1, arg2, info); \
+ } \
+}
+
+FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
+FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
+FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
+FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
+
+#undef WRITE_CALL_0
+#undef WRITE_CALL_1
+#undef WRITE_CALL_2
+#undef WRITE_CALL_2_VOID
+
+
+} } // namespace v8::internal
+
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 1423d5642b..a80b613615 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -82,35 +82,258 @@ class Arguments BASE_EMBEDDED {
};
+// mappings from old property callbacks to new ones
+// F(old name, new name, return value, parameters...)
+//
+// These aren't included in the list as they have duplicate signatures
+// F(NamedPropertyEnumerator, NamedPropertyEnumeratorCallback, ...)
+// F(NamedPropertyGetter, NamedPropertyGetterCallback, ...)
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
+ F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback, v8::Array) \
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
+ F(AccessorGetter, AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
+ F(NamedPropertyQuery, \
+ NamedPropertyQueryCallback, \
+ v8::Integer, \
+ v8::Local<v8::String>) \
+ F(NamedPropertyDeleter, \
+ NamedPropertyDeleterCallback, \
+ v8::Boolean, \
+ v8::Local<v8::String>) \
+ F(IndexedPropertyGetter, \
+ IndexedPropertyGetterCallback, \
+ v8::Value, \
+ uint32_t) \
+ F(IndexedPropertyQuery, \
+ IndexedPropertyQueryCallback, \
+ v8::Integer, \
+ uint32_t) \
+ F(IndexedPropertyDeleter, \
+ IndexedPropertyDeleterCallback, \
+ v8::Boolean, \
+ uint32_t) \
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
+ F(NamedPropertySetter, \
+ NamedPropertySetterCallback, \
+ v8::Value, \
+ v8::Local<v8::String>, \
+ v8::Local<v8::Value>) \
+ F(IndexedPropertySetter, \
+ IndexedPropertySetterCallback, \
+ v8::Value, \
+ uint32_t, \
+ v8::Local<v8::Value>) \
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
+ F(AccessorSetter, \
+ AccessorSetterCallback, \
+ void, \
+ v8::Local<v8::String>, \
+ v8::Local<v8::Value>) \
+
+// All property callbacks as well as invocation callbacks
+#define FOR_EACH_CALLBACK_TABLE_MAPPING(F) \
+ F(InvocationCallback, FunctionCallback) \
+ F(AccessorGetter, AccessorGetterCallback) \
+ F(AccessorSetter, AccessorSetterCallback) \
+ F(NamedPropertySetter, NamedPropertySetterCallback) \
+ F(NamedPropertyQuery, NamedPropertyQueryCallback) \
+ F(NamedPropertyDeleter, NamedPropertyDeleterCallback) \
+ F(IndexedPropertyGetter, IndexedPropertyGetterCallback) \
+ F(IndexedPropertySetter, IndexedPropertySetterCallback) \
+ F(IndexedPropertyQuery, IndexedPropertyQueryCallback) \
+ F(IndexedPropertyDeleter, IndexedPropertyDeleterCallback) \
+ F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback) \
+
+
+// TODO(dcarney): Remove this class when old callbacks are gone.
+class CallbackTable {
+ public:
+ // TODO(dcarney): Flip this when it makes sense for performance.
+ static const bool kStoreVoidFunctions = true;
+ static inline bool ReturnsVoid(Isolate* isolate, void* function) {
+ CallbackTable* table = isolate->callback_table();
+ bool contains =
+ table != NULL &&
+ table->map_.occupancy() != 0 &&
+ table->Contains(function);
+ return contains == kStoreVoidFunctions;
+ }
+
+ STATIC_ASSERT(sizeof(intptr_t) == sizeof(AccessorGetterCallback));
+
+ template<typename F>
+ static inline void* FunctionToVoidPtr(F function) {
+ return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(function));
+ }
+
+#define WRITE_REGISTER(OldFunction, NewFunction) \
+ static OldFunction Register(Isolate* isolate, NewFunction f) { \
+ InsertCallback(isolate, FunctionToVoidPtr(f), true); \
+ return reinterpret_cast<OldFunction>(f); \
+ } \
+ \
+ static OldFunction Register(Isolate* isolate, OldFunction f) { \
+ InsertCallback(isolate, FunctionToVoidPtr(f), false); \
+ return f; \
+ }
+ FOR_EACH_CALLBACK_TABLE_MAPPING(WRITE_REGISTER)
+#undef WRITE_REGISTER
+
+ private:
+ CallbackTable();
+ bool Contains(void* function);
+ static void InsertCallback(Isolate* isolate,
+ void* function,
+ bool returns_void);
+ HashMap map_;
+ DISALLOW_COPY_AND_ASSIGN(CallbackTable);
+};
+
+
// Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack
// can.
-class CustomArguments : public Relocatable {
+template<int kArrayLength>
+class CustomArgumentsBase : public Relocatable {
+ public:
+ virtual inline void IterateInstance(ObjectVisitor* v) {
+ v->VisitPointers(values_, values_ + kArrayLength);
+ }
+ protected:
+ inline Object** end() { return values_ + kArrayLength - 1; }
+ explicit inline CustomArgumentsBase(Isolate* isolate)
+ : Relocatable(isolate) {}
+ Object* values_[kArrayLength];
+};
+
+
+template<typename T>
+class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
public:
- inline CustomArguments(Isolate* isolate,
- Object* data,
- Object* self,
- JSObject* holder) : Relocatable(isolate) {
- ASSERT(reinterpret_cast<Object*>(isolate)->IsSmi());
- values_[3] = self;
- values_[2] = holder;
- values_[1] = data;
- values_[0] = reinterpret_cast<Object*>(isolate);
+ static const int kReturnValueOffset = T::kReturnValueIndex;
+
+ typedef CustomArgumentsBase<T::kArgsLength> Super;
+ ~CustomArguments() {
+ // TODO(dcarney): create a new zap value for this.
+ this->end()[kReturnValueOffset] =
+ reinterpret_cast<Object*>(kHandleZapValue);
+ }
+
+ protected:
+ explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
+
+ template<typename V>
+ v8::Handle<V> GetReturnValue(Isolate* isolate);
+
+ inline Isolate* isolate() {
+ return reinterpret_cast<Isolate*>(this->end()[T::kIsolateIndex]);
}
+};
+
+
+class PropertyCallbackArguments
+ : public CustomArguments<PropertyCallbackInfo<Value> > {
+ public:
+ typedef PropertyCallbackInfo<Value> T;
+ typedef CustomArguments<T> Super;
+ static const int kArgsLength = T::kArgsLength;
+ static const int kThisIndex = T::kThisIndex;
+ static const int kHolderIndex = T::kHolderIndex;
+
+ PropertyCallbackArguments(Isolate* isolate,
+ Object* data,
+ Object* self,
+ JSObject* holder)
+ : Super(isolate) {
+ Object** values = this->end();
+ values[T::kThisIndex] = self;
+ values[T::kHolderIndex] = holder;
+ values[T::kDataIndex] = data;
+ values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
+ values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+ ASSERT(values[T::kHolderIndex]->IsHeapObject());
+ ASSERT(values[T::kIsolateIndex]->IsSmi());
+ }
+
+ /*
+ * The following Call functions wrap the calling of all callbacks to handle
+ * calling either the old or the new style callbacks depending on which one
+ * has been registered.
+ * For old callbacks which return an empty handle, the ReturnValue is checked
+ * and used if it's been set to anything inside the callback.
+ * New style callbacks always use the return value.
+ */
+#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \
+ v8::Handle<ReturnValue> Call(OldFunction f); \
+
+#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \
+ v8::Handle<ReturnValue> Call(OldFunction f, Arg1 arg1); \
+
+#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
+ v8::Handle<ReturnValue> Call(OldFunction f, Arg1 arg1, Arg2 arg2); \
+
+#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
+ void Call(OldFunction f, Arg1 arg1, Arg2 arg2); \
+
+FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
+FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
+FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
+FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
+
+#undef WRITE_CALL_0
+#undef WRITE_CALL_1
+#undef WRITE_CALL_2
+#undef WRITE_CALL_2_VOID
+};
+
+
+class FunctionCallbackArguments
+ : public CustomArguments<FunctionCallbackInfo<Value> > {
+ public:
+ typedef FunctionCallbackInfo<Value> T;
+ typedef CustomArguments<T> Super;
+ static const int kArgsLength = T::kArgsLength;
- inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
-#ifdef DEBUG
- for (size_t i = 0; i < ARRAY_SIZE(values_); i++) {
- values_[i] = reinterpret_cast<Object*>(kZapValue);
- }
-#endif
+ FunctionCallbackArguments(internal::Isolate* isolate,
+ internal::Object* data,
+ internal::JSFunction* callee,
+ internal::Object* holder,
+ internal::Object** argv,
+ int argc,
+ bool is_construct_call)
+ : Super(isolate),
+ argv_(argv),
+ argc_(argc),
+ is_construct_call_(is_construct_call) {
+ Object** values = end();
+ values[T::kDataIndex] = data;
+ values[T::kCalleeIndex] = callee;
+ values[T::kHolderIndex] = holder;
+ values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
+ values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+ ASSERT(values[T::kCalleeIndex]->IsJSFunction());
+ ASSERT(values[T::kHolderIndex]->IsHeapObject());
+ ASSERT(values[T::kIsolateIndex]->IsSmi());
}
- void IterateInstance(ObjectVisitor* v);
- Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
+ /*
+ * The following Call function wraps the calling of all callbacks to handle
+ * calling either the old or the new style callbacks depending on which one
+ * has been registered.
+ * For old callbacks which return an empty handle, the ReturnValue is checked
+ * and used if it's been set to anything inside the callback.
+ * New style callbacks always use the return value.
+ */
+ v8::Handle<v8::Value> Call(InvocationCallback f);
private:
- Object* values_[4];
+ internal::Object** argv_;
+ int argc_;
+ bool is_construct_call_;
};
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index b39d9ee122..0102f337bf 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -2473,6 +2473,23 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
}
+void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
+ int fraction_bits,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-874.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
+ // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
+ ASSERT(fraction_bits > 0 && fraction_bits <= 32);
+ ASSERT(CpuFeatures::IsSupported(VFP3));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int i = ((32 - fraction_bits) >> 4) & 1;
+ int imm4 = (32 - fraction_bits) & 0xf;
+ emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
+ vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
+}
+
+
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
@@ -3000,7 +3017,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Put down constant pool marker "Undefined instruction".
// The data size helps disassembly know what to print.
- emit(kConstantPoolMarker | EncodeConstantPoolLength(size_after_marker));
+ emit(kConstantPoolMarker |
+ EncodeConstantPoolLength(size_after_marker / kPointerSize));
if (require_64_bit_align) {
emit(kConstantPoolMarker);
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 0fd5186734..3000860ba4 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -459,6 +459,17 @@ class Operand BASE_EMBEDDED {
// rm <shift_op> shift_imm
explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+ INLINE(static Operand SmiUntag(Register rm)) {
+ return Operand(rm, ASR, kSmiTagSize);
+ }
+ INLINE(static Operand PointerOffsetFromSmiKey(Register key)) {
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
+ }
+ INLINE(static Operand DoubleOffsetFromSmiKey(Register key)) {
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
+ return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
+ }
// rm <shift_op> rs
explicit Operand(Register rm, ShiftOp shift_op, Register rs);
@@ -515,6 +526,12 @@ class MemOperand BASE_EMBEDDED {
// [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
explicit MemOperand(Register rn, Register rm,
ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+ INLINE(static MemOperand PointerAddressFromSmiKey(Register array,
+ Register key,
+ AddrMode am = Offset)) {
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
+ }
void set_offset(int32_t offset) {
ASSERT(rm_.is(no_reg));
@@ -1032,6 +1049,9 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
+ void vcvt_f64_s32(const DwVfpRegister dst,
+ int fraction_bits,
+ const Condition cond = al);
void vneg(const DwVfpRegister dst,
const DwVfpRegister src,
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 3cc2797e94..6333924ca0 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -215,12 +215,9 @@ static void AllocateJSArray(MacroAssembler* masm,
// Allocate the JSArray object together with space for a FixedArray with the
// requested number of elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ mov(elements_array_end,
Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
- __ add(elements_array_end,
- elements_array_end,
- Operand(array_size, ASR, kSmiTagSize));
+ __ add(elements_array_end, elements_array_end, Operand::SmiUntag(array_size));
__ Allocate(elements_array_end,
result,
scratch1,
@@ -249,7 +246,6 @@ static void AllocateJSArray(MacroAssembler* masm,
FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
- STATIC_ASSERT(kSmiTag == 0);
__ sub(elements_array_storage,
elements_array_storage,
Operand(kHeapObjectTag));
@@ -261,7 +257,6 @@ static void AllocateJSArray(MacroAssembler* masm,
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
- STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(array_size,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
@@ -270,10 +265,9 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array_storage: elements array element storage
// array_size: smi-tagged size of elements array
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(elements_array_end,
elements_array_storage,
- Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
+ Operand::PointerOffsetFromSmiKey(array_size));
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
@@ -335,7 +329,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
__ bind(&argc_one_or_more);
__ cmp(r0, Operand(1));
__ b(ne, &argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
__ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
__ tst(r2, r2);
__ b(ne, &not_empty_array);
@@ -344,6 +337,7 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
__ b(&empty_array);
__ bind(&not_empty_array);
+ STATIC_ASSERT(kSmiTag == 0);
__ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
__ b(ne, call_generic_code);
@@ -375,7 +369,7 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
- __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
+ __ SmiTag(r2, r0);
// r0: argc
// r1: constructor
@@ -478,7 +472,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
+ __ SmiTst(r2);
__ Assert(ne, "Unexpected initial map for InternalArray function");
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for InternalArray function");
@@ -512,7 +506,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
+ __ SmiTst(r2);
__ Assert(ne, "Unexpected initial map for Array function");
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for Array function");
@@ -545,7 +539,7 @@ void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r3, Operand(kSmiTagMask));
+ __ SmiTst(r3);
__ Assert(ne, "Unexpected initial map for Array function");
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for Array function");
@@ -778,7 +772,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ SmiTag(r0);
__ push(r0); // Smi-tagged arguments count.
__ push(r1); // Constructor function.
@@ -931,7 +925,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ SmiTag(r0, r3);
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
// Initialize the fields to undefined.
@@ -1004,7 +998,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Set up number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+ __ SmiUntag(r0, r3);
// Copy arguments and receiver to the expression stack.
// r0: number of arguments
@@ -1340,6 +1334,11 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
@@ -1454,7 +1453,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
+ __ SmiTag(r0);
__ push(r0);
__ push(r2);
@@ -1462,7 +1461,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(r2, r0);
__ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ SmiUntag(r0);
// Exit the internal frame.
}
@@ -1565,7 +1564,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
+ __ SmiUntag(r2);
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r2, r0); // Check formal and actual parameter counts.
@@ -1604,7 +1603,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// here which will cause r2 to become negative.
__ sub(r2, sp, r2);
// Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ cmp(r2, Operand::PointerOffsetFromSmiKey(r0));
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
@@ -1714,7 +1713,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Invoke the function.
Label call_proxy;
ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ SmiUntag(r0);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &call_proxy);
@@ -1743,7 +1742,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(3 * kPointerSize));
@@ -1759,7 +1758,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ ldr(r1, MemOperand(fp, -3 * kPointerSize));
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}
@@ -1790,7 +1789,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// adjust for return address and receiver
__ add(r0, r0, Operand(2 * kPointerSize));
__ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
@@ -1821,7 +1820,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// Copy the arguments (including the receiver) to the new stack frame.
// r0: copy start address
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 86da76ac3c..c667c90721 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -307,8 +307,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into r4.
- __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
- __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
+ __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot));
+ __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot));
__ cmp(r2, r5);
__ b(eq, &install_optimized);
@@ -317,19 +317,17 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
__ bind(&loop);
// Do not double check first entry.
-
- __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
__ b(eq, &install_unoptimized);
- __ sub(r4, r4, Operand(
- Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
+ __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
__ ldr(r5, MemOperand(r5));
__ cmp(r2, r5);
__ b(ne, &loop);
// Hit: fetch the optimized code.
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
__ add(r5, r5, Operand(kPointerSize));
__ ldr(r4, MemOperand(r5));
@@ -521,8 +519,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
Register mantissa = result2_;
Label not_special;
- // Convert from Smi to integer.
- __ mov(source_, Operand(source_, ASR, kSmiTagSize));
+ __ SmiUntag(source_);
// Move sign bit from source to destination. This works because the sign bit
// in the exponent word of the double has the same position and polarity as
// the 2's complement sign bit in a Smi.
@@ -772,7 +769,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Lhs is a smi, rhs is a number.
// Convert lhs to a double in d7.
- __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
+ __ SmiToDouble(d7, lhs);
// Load the double from rhs, tagged HeapNumber r0, to d6.
__ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
@@ -803,7 +800,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ sub(r7, lhs, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
// Convert rhs to a double in d6 .
- __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
+ __ SmiToDouble(d6, rhs);
// Fall through to both_loaded_as_doubles.
}
@@ -1230,7 +1227,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
if (types_.Contains(SMI)) {
// Smis: 0 -> false, all other -> true
- __ tst(tos_, Operand(kSmiTagMask));
+ __ SmiTst(tos_);
// tos_ contains the correct return value already
__ Ret(eq);
} else if (types_.NeedsMap()) {
@@ -1535,7 +1532,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
__ b(mi, &try_float);
// Tag the result as a smi and we're done.
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+ __ SmiTag(r0, r1);
__ Ret();
// Try to store the result in a heap number.
@@ -1882,9 +1879,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
// Check that the signed result fits in a Smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &not_smi_result);
- __ SmiTag(right, scratch1);
+ __ TrySmiTag(right, scratch1, &not_smi_result);
__ Ret();
break;
default:
@@ -1946,12 +1941,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Load left and right operands into d0 and d1.
if (smi_operands) {
- __ SmiUntag(scratch1, right);
- __ vmov(d1.high(), scratch1);
- __ vcvt_f64_s32(d1, d1.high());
- __ SmiUntag(scratch1, left);
- __ vmov(d0.high(), scratch1);
- __ vcvt_f64_s32(d0, d0.high());
+ __ SmiToDouble(d1, right);
+ __ SmiToDouble(d0, left);
} else {
// Load right operand into d1.
if (right_type == BinaryOpIC::INT32) {
@@ -2062,9 +2053,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
}
// Check that the *signed* result fits in a smi.
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ SmiTag(r0, r2);
+ __ TrySmiTag(r0, r2, &result_not_a_smi);
__ Ret();
// Allocate new heap number for result.
@@ -2124,7 +2113,6 @@ void BinaryOpStub_GenerateSmiCode(
// Perform combined smi check on both operands.
__ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(scratch1, &not_smis);
// If the smi-smi operation results in a smi return is generated.
@@ -2162,8 +2150,12 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -2188,7 +2180,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub string_add_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_stub);
@@ -2408,12 +2401,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
UNREACHABLE();
}
- // Check if the result fits in a smi.
- __ add(scratch1, r2, Operand(0x40000000), SetCC);
- // If not try to return a heap number. (We know the result is an int32.)
- __ b(mi, &return_heap_number);
- // Tag the result and return.
- __ SmiTag(r0, r2);
+ // Check if the result fits in a smi. If not try to return a heap number.
+ // (We know the result is an int32).
+ __ TrySmiTag(r0, r2, &return_heap_number);
__ Ret();
__ bind(&return_heap_number);
@@ -2459,8 +2449,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -2507,8 +2501,12 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -2531,8 +2529,12 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -2548,7 +2550,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &left_not_string);
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ StringAddStub string_add_left_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
@@ -2558,7 +2561,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ StringAddStub string_add_right_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
@@ -2627,7 +2631,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Input is a smi. Convert to double and load the low and high words
// of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ SmiToDouble(d7, r0);
+ __ vmov(r2, r3, d7);
__ b(&loaded);
__ bind(&input_not_smi);
@@ -3825,7 +3830,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Read the argument from the stack and return it.
__ sub(r3, r0, r1);
- __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r0, MemOperand(r3, kDisplacement));
__ Jump(lr);
@@ -3839,7 +3844,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Read the argument from the adaptor frame and return it.
__ sub(r3, r0, r1);
- __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r0, MemOperand(r3, kDisplacement));
__ Jump(lr);
@@ -4092,7 +4097,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ str(r1, MemOperand(sp, 0));
- __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
__ str(r3, MemOperand(sp, 1 * kPointerSize));
@@ -4100,9 +4105,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ cmp(r1, Operand::Zero());
+ __ SmiUntag(r1, SetCC);
__ b(eq, &add_arguments_object);
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
__ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
@@ -4141,8 +4145,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // Untag the length for the loop.
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
+ __ SmiUntag(r1);
// Copy the fixed array slots.
Label loop;
@@ -4211,7 +4214,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
__ b(ne, &runtime);
@@ -4219,7 +4221,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the RegExp has been compiled (data contains a fixed array).
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- __ tst(regexp_data, Operand(kSmiTagMask));
+ __ SmiTst(regexp_data);
__ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
__ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
@@ -4324,7 +4326,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
__ cmp(r3, Operand(r1));
__ b(ls, &runtime);
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+ __ SmiUntag(r1);
STATIC_ASSERT(4 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
@@ -4399,7 +4401,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(r2, r9, Operand(r1, LSL, r3));
__ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
- __ mov(r8, Operand(r8, ASR, kSmiTagSize));
+ __ SmiUntag(r8);
__ add(r3, r9, Operand(r8, LSL, r3));
// Argument 2 (r1): Previous index.
@@ -4486,13 +4488,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
__ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
+ __ cmp(r2, Operand::SmiUntag(r0));
__ b(gt, &runtime);
// r1: number of capture registers
// r4: subject string
// Store the capture count.
- __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
+ __ SmiTag(r2, r1);
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
@@ -4536,7 +4538,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Read the value from the static offsets vector buffer.
__ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
// Store the smi value in the last match info.
- __ mov(r3, Operand(r3, LSL, kSmiTagSize));
+ __ SmiTag(r3);
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
__ jmp(&next_capture);
__ bind(&done);
@@ -4584,7 +4586,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (9) Sliced string. Replace subject with parent. Go to (4).
// Load offset into r9 and replace subject string with parent.
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ mov(r9, Operand(r9, ASR, kSmiTagSize));
+ __ SmiUntag(r9);
__ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
@@ -4611,7 +4613,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// FixedArray.
int objects_size =
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
+ __ SmiUntag(r5, r1);
__ add(r2, r5, Operand(objects_size));
__ Allocate(
r2, // In: Size, in words.
@@ -4654,7 +4656,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ mov(r2, Operand(factory->fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
- __ mov(r6, Operand(r5, LSL, kSmiTagSize));
+ __ SmiTag(r6, r5);
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with undefined.
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
@@ -4771,6 +4773,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ bind(&megamorphic);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
@@ -4970,7 +4973,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ cmp(ip, Operand(index_));
__ b(ls, index_out_of_range_);
- __ mov(index_, Operand(index_, ASR, kSmiTagSize));
+ __ SmiUntag(index_);
StringCharLoadGenerator::Generate(masm,
object_,
@@ -4978,7 +4981,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
result_,
&call_runtime_);
- __ mov(result_, Operand(result_, LSL, kSmiTagSize));
+ __ SmiTag(result_);
__ bind(&exit_);
}
@@ -5024,7 +5027,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
- __ mov(index_, Operand(index_, LSL, kSmiTagSize));
+ __ SmiTag(index_);
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
__ Move(result_, r0);
@@ -5050,8 +5053,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case_);
@@ -5476,9 +5478,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Make sure first argument is a string.
__ ldr(r0, MemOperand(sp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
// Do a JumpIfSmi, but fold its jump into the subsequent string test.
- __ tst(r0, Operand(kSmiTagMask));
+ __ SmiTst(r0);
Condition is_string = masm->IsObjectStringType(r0, r1, ne);
ASSERT(is_string == eq);
__ b(NegateCondition(is_string), &runtime);
@@ -5822,7 +5823,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
+ if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
__ JumpIfEitherSmi(r0, r1, &call_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -5875,8 +5876,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&strings_not_empty);
}
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize));
+ __ SmiUntag(r2);
+ __ SmiUntag(r3);
// Both strings are non-empty.
// r0: first string
// r1: second string
@@ -6114,15 +6115,49 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ if ((flags_ & ERECT_FRAME) != 0) {
+ GenerateRegisterArgsPop(masm);
+ // Build a frame
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ __ CallRuntime(Runtime::kStringAdd, 2);
+ }
+ __ Ret();
+ } else {
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ }
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ if ((flags_ & ERECT_FRAME) != 0) {
+ GenerateRegisterArgsPop(masm);
+ // Build a frame
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
+ }
+ __ Ret();
+ } else {
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
}
}
+void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ push(r0);
+ __ push(r1);
+}
+
+
+void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
+ __ pop(r1);
+ __ pop(r0);
+}
+
+
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
@@ -6184,7 +6219,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
} else {
// Untag before subtracting to avoid handling overflow.
__ SmiUntag(r1);
- __ sub(r0, r1, SmiUntagOperand(r0));
+ __ sub(r0, r1, Operand::SmiUntag(r0));
}
__ Ret();
@@ -6218,10 +6253,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ vldr(d1, r2, HeapNumber::kValueOffset);
__ b(&left);
__ bind(&right_smi);
- __ SmiUntag(r2, r0); // Can't clobber r0 yet.
- SwVfpRegister single_scratch = d2.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d1, single_scratch);
+ __ SmiToDouble(d1, r0);
__ bind(&left);
__ JumpIfSmi(r1, &left_smi);
@@ -6231,10 +6263,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ vldr(d0, r2, HeapNumber::kValueOffset);
__ b(&done);
__ bind(&left_smi);
- __ SmiUntag(r2, r1); // Can't clobber r1 yet.
- single_scratch = d3.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d0, single_scratch);
+ __ SmiToDouble(d0, r1);
__ bind(&done);
// Compare operands.
@@ -6645,7 +6674,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
// Compute the capacity mask.
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
+ __ SmiUntag(scratch1);
__ sub(scratch1, scratch1, Operand(1));
// Generate an unrolled loop that performs a few probes before
@@ -6726,7 +6755,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
__ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, kSmiTagSize));
+ __ SmiUntag(mask);
__ sub(mask, mask, Operand(1));
__ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
@@ -7124,7 +7153,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
__ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r0, MemOperand(r6, 0));
// Update the write barrier for the array store.
@@ -7136,7 +7165,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
__ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
__ Ret();
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 0b1a8b8472..863848cc37 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -211,11 +211,13 @@ class StringHelper : public AllStatic {
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_ADD_FLAGS = 1 << 0,
// Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
// Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
+ // Stub needs a frame before calling the runtime
+ ERECT_FRAME = 1 << 3,
// Omit both string checks in stub.
NO_STRING_CHECK_IN_STUB =
NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
@@ -241,6 +243,9 @@ class StringAddStub: public PlatformCodeStub {
Register scratch4,
Label* slow);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateRegisterArgsPop(MacroAssembler* masm);
+
const StringAddFlags flags_;
};
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 9d773d4cc3..7bf253a333 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -440,7 +440,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Label indirect_string_loaded;
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ add(index, index, Operand(result, ASR, kSmiTagSize));
+ __ add(index, index, Operand::SmiUntag(result));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
@@ -510,9 +510,9 @@ void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
Register index,
Register value) {
if (FLAG_debug_code) {
- __ tst(index, Operand(kSmiTagMask));
+ __ SmiTst(index);
__ Check(eq, "Non-smi index");
- __ tst(value, Operand(kSmiTagMask));
+ __ SmiTst(value);
__ Check(eq, "Non-smi value");
__ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
@@ -540,10 +540,10 @@ void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
if (encoding == String::ONE_BYTE_ENCODING) {
// Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
- __ strb(value, MemOperand(ip, index, LSR, 1));
+ __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
} else {
// No need to untag a smi for two-byte addressing.
- __ strh(value, MemOperand(ip, index));
+ __ strh(value, MemOperand(ip, index)); // LSL(1 - kSmiTagSize).
}
}
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 6bfaf414c3..2f0a7c4e54 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -132,7 +132,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ tst(reg, Operand(0xc0000000));
__ Assert(eq, "Unable to encode value as smi");
}
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
+ __ SmiTag(reg);
}
}
__ stm(db_w, sp, object_regs | non_object_regs);
@@ -154,7 +154,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+ __ SmiUntag(reg);
}
if (FLAG_debug_code &&
(((object_regs |non_object_regs) & (1 << r)) == 0)) {
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 001d3c830d..d973889bbe 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -53,14 +53,13 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
+ // The optimized code is going to be patched, so we cannot use it any more.
+ function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function");
+
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
@@ -277,7 +276,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
+ PrintFunctionName();
PrintF(" => node=%u, frame=%d->%d]\n",
ast_id,
input_frame_size,
@@ -371,189 +370,12 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
+ PrintFunctionName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
-// This code is very similar to ia32 code, but relies on register names (fp, sp)
-// and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- output_frame->SetRegister(fp.code(), fp_value);
- }
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
- if (is_topmost) {
- output_frame->SetRegister(pc.code(), pc_value);
- }
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -597,6 +419,12 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on ARM in the input frame.
+ return false;
+}
+
+
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -640,7 +468,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the address of the location in the code object if possible (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
- if (type() == EAGER) {
+ if (type() == EAGER || type() == SOFT) {
__ mov(r3, Operand::Zero());
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
@@ -695,7 +523,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
- if (type() == EAGER || type() == OSR) {
+ if (type() == EAGER || type() == SOFT || type() == OSR) {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
@@ -814,7 +642,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- if (type() == EAGER) {
+ if (type() == EAGER || type() == SOFT) {
__ nop();
} else {
// Emulate ia32 like call by pushing return address to stack.
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index b84d35535d..f55552df5b 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1102,6 +1102,7 @@ int Decoder::DecodeType7(Instruction* instr) {
// vmov: Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
+// vcvt.f64.s32 Dd, Dd, #<fbits>
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
@@ -1138,6 +1139,13 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
+ (instr->Bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>
+ int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0));
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", fraction_bits);
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index 5cbe77afc2..f5a7dbd3ee 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -45,6 +45,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) {
}
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+
+
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 0bc1f48c87..33a499c275 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -175,6 +175,7 @@ void FullCodeGenerator::Generate() {
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
+ info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -438,9 +439,11 @@ void FullCodeGenerator::EmitReturnSequence() {
PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
masm_->mov(sp, fp);
+ int no_frame_start = masm_->pc_offset();
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
#ifdef DEBUG
@@ -1195,7 +1198,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the current entry of the array into register r3.
__ ldr(r2, MemOperand(sp, 2 * kPointerSize));
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r3, MemOperand::PointerAddressFromSmiKey(r2, r0));
// Get the expected map from the stack or a smi in the
// permanent slow case into register r2.
@@ -1961,8 +1964,102 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING:
- UNIMPLEMENTED();
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(&l_send);
+
+ // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ push(r3); // iter
+ __ push(r0); // exception
+ __ mov(r0, r3); // iter
+ __ push(r0); // push LoadIC state
+ __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
+ Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(throw_ic); // iter.throw in r0
+ __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state
+ __ jmp(&l_call);
+
+ // try { received = yield result.value }
+ __ bind(&l_try);
+ __ pop(r0); // result.value
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(r0); // result.value
+ __ ldr(r3, MemOperand(sp, (0 + 1) * kPointerSize + handler_size)); // g
+ __ push(r3); // g
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &l_resume);
+ EmitReturnIteratorResult(false);
+ __ bind(&l_resume); // received in r0
+ __ PopTryHandler();
+
+ // receiver = iter; f = iter.send; arg = received;
+ __ bind(&l_send);
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ push(r3); // iter
+ __ push(r0); // received
+ __ mov(r0, r3); // iter
+ __ push(r0); // push LoadIC state
+ __ LoadRoot(r2, Heap::ksend_stringRootIndex); // "send"
+ Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(send_ic); // iter.send in r0
+ __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state
+
+ // result = f.call(receiver, arg);
+ __ bind(&l_call);
+ Label l_call_runtime;
+ __ JumpIfSmi(r0, &l_call_runtime);
+ __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+ __ b(ne, &l_call_runtime);
+ __ mov(r1, r0);
+ ParameterCount count(1);
+ __ InvokeFunction(r1, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&l_loop);
+ __ bind(&l_call_runtime);
+ __ push(r0);
+ __ CallRuntime(Runtime::kCall, 3);
+
+ // val = result.value; if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ // result.value
+ __ push(r0); // save result
+ __ LoadRoot(r2, Heap::kvalue_stringRootIndex); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in r0
+ __ pop(r1); // result
+ __ push(r0); // result.value
+ __ mov(r0, r1); // result
+ __ push(r0); // push LoadIC state
+ __ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done"
+ Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(done_ic); // result.done in r0
+ __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state
+ ToBooleanStub stub(r0);
+ __ CallStub(&stub);
+ __ cmp(r0, Operand(0));
+ __ b(eq, &l_try);
+
+ // result.value
+ __ pop(r0); // result.value
+ context()->DropAndPlug(2, r0); // drop iter and g
+ break;
+ }
}
}
@@ -2166,23 +2263,18 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// BinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
- __ b(&stub_call);
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ mov(right, Operand(left, ASR, scratch1));
__ bic(right, right, Operand(kSmiTagMask));
break;
case Token::SHL: {
- __ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &stub_call);
- __ SmiTag(right, scratch1);
+ __ TrySmiTag(right, scratch1, &stub_call);
break;
}
case Token::SHR: {
- __ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSR, scratch2));
@@ -2761,7 +2853,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask));
+ __ SmiTst(r0);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2782,7 +2874,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask | 0x80000000));
+ __ NonNegativeSmiTst(r0);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2909,16 +3001,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ LoadInstanceDescriptors(r1, r4);
// r4: descriptor array.
// r3: valid entries in the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
__ mov(ip, Operand(DescriptorArray::kDescriptorSize));
__ mul(r3, r3, ip);
// Calculate location of the first key name.
__ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Calculate the end of the descriptor array.
__ mov(r2, r4);
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
// Loop through all the keys in the descriptor array. If one of these is the
// string "valueOf" the result is false.
@@ -3686,12 +3775,11 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Label done, not_found;
// tmp now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
// r2 now holds finger offset as a smi.
__ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// r3 now points to the start of fixed array elements.
- __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+ __ ldr(r2, MemOperand::PointerAddressFromSmiKey(r3, r2, PreIndex));
// Note side effect of PreIndex: r3 now points to the key of the pair.
__ cmp(key, r2);
__ b(ne, &not_found);
@@ -4654,9 +4742,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ push(result_register());
// Cook return address in link register to stack (smi encoded Code* delta)
__ sub(r1, lr, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r1, r1, Operand(r1)); // Convert to smi.
+ __ SmiTag(r1);
// Store result register while executing finally block.
__ push(r1);
@@ -4710,8 +4796,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
// Uncook return address and return.
__ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
+ __ SmiUntag(r1);
__ add(pc, r1, Operand(masm_->CodeObject()));
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index c644be59d7..14c4794f4f 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -290,10 +290,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
__ b(hs, out_of_range);
// Fast case: Do the load.
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(scratch2,
- MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch2, ip);
// In case the loaded value is the_hole we have to consult GetProperty
@@ -567,7 +564,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow_load);
- __ mov(r0, Operand(r2, ASR, kSmiTagSize));
+ __ SmiUntag(r0, r2);
// r0: untagged index
__ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
@@ -960,7 +957,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
- __ mov(r2, Operand(r0, ASR, kSmiTagSize));
+ __ SmiUntag(r2, r0);
__ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
__ Ret();
@@ -1133,7 +1130,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ JumpIfSmi(r1, &slow);
// Check that the key is an array index, that is Uint32.
- __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
+ __ NonNegativeSmiTst(r0);
__ b(ne, &slow);
// Get the map of the receiver.
@@ -1194,7 +1191,7 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
@@ -1321,8 +1318,7 @@ static void KeyedStoreGenerateGenericHelper(
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
+ __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
__ Ret();
__ bind(&non_smi_value);
@@ -1338,7 +1334,7 @@ static void KeyedStoreGenerateGenericHelper(
__ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
__ str(value, MemOperand(address));
// Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned.
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 3fe46ffd7b..e1bb69eacd 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -217,15 +217,6 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
value()->PrintTo(stream);
@@ -989,6 +980,10 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
}
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
@@ -1459,7 +1454,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
}
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
+ instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kCanOverflow)) {
return AssignEnvironment(DefineAsRegister(mod));
} else {
return DefineAsRegister(mod);
@@ -1718,12 +1714,6 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1836,7 +1826,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegister(instr->index());
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseTempRegister(instr->value());
LSeqStringSetChar* result =
new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
return DefineAsRegister(result);
@@ -2333,7 +2323,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject())) {
return AssignEnvironment(result);
}
return result;
@@ -2398,16 +2390,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 116d57621d..9bcd44ae05 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -56,7 +56,6 @@ class LCodeGen;
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
- V(ArrayLiteral) \
V(BitI) \
V(BitNotI) \
V(BoundsCheck) \
@@ -90,6 +89,7 @@ class LCodeGen;
V(ConstantI) \
V(ConstantT) \
V(Context) \
+ V(DebugBreak) \
V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
@@ -114,7 +114,6 @@ class LCodeGen;
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
@@ -152,7 +151,6 @@ class LCodeGen;
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -698,6 +696,12 @@ class LMultiplySubD: public LTemplateInstruction<1, 3, 0> {
};
+class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -887,24 +891,6 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsNilAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@@ -2462,20 +2448,6 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> {
};
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 3a0f476b5a..09a0e9c066 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -95,6 +95,12 @@ void LCodeGen::FinishCode(Handle<Code> code) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
}
+ if (graph()->depends_on_empty_array_proto_elements()) {
+ isolate()->initial_object_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ isolate()->initial_array_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ }
}
@@ -354,9 +360,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
- bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
@@ -365,7 +369,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
if (deopt_jump_table_[i].needs_frame) {
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
- if (is_lazy_deopt) {
+ if (type == Deoptimizer::LAZY) {
if (needs_frame_is_call.is_bound()) {
__ b(&needs_frame_is_call);
} else {
@@ -398,7 +402,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
}
} else {
- if (is_lazy_deopt) {
+ if (type == Deoptimizer::LAZY) {
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
} else {
@@ -827,14 +831,13 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -867,9 +870,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
+ (deopt_jump_table_.last().bailout_type != bailout_type) ||
(deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
deopt_jump_table_.Add(table_entry, zone());
}
__ b(cc, &deopt_jump_table_.last().label);
@@ -877,6 +882,21 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
}
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(cc, environment, bailout_type);
+}
+
+
+void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
+ ASSERT(!info()->IsStub());
+ DeoptimizeIf(al, environment, Deoptimizer::SOFT);
+}
+
+
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -1428,7 +1448,6 @@ void LCodeGen::DoDivI(LDivI* instr) {
const Register left = ToRegister(instr->left());
const Register right = ToRegister(instr->right());
- const Register scratch = scratch0();
const Register result = ToRegister(instr->result());
// Check for x / 0.
@@ -1477,8 +1496,8 @@ void LCodeGen::DoDivI(LDivI* instr) {
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
- __ TrySmiTag(left, &deoptimize, scratch);
- __ TrySmiTag(right, &deoptimize, scratch);
+ __ TrySmiTag(left, &deoptimize);
+ __ TrySmiTag(right, &deoptimize);
__ b(al, deferred->entry());
__ bind(deferred->exit());
@@ -1930,7 +1949,7 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Label done;
// If the object is a smi return the object.
- __ tst(input, Operand(kSmiTagMask));
+ __ SmiTst(input);
__ Move(result, input, eq);
__ b(eq, &done);
@@ -1955,7 +1974,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
- __ tst(object, Operand(kSmiTagMask));
+ __ SmiTst(object);
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
DeoptimizeIf(ne, instr->environment());
@@ -2178,6 +2197,11 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
}
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ stop("LBreak");
+}
+
+
void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2236,7 +2260,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(reg, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
- __ tst(reg, Operand(kSmiTagMask));
+ __ SmiTst(reg);
DeoptimizeIf(eq, instr->environment());
}
@@ -2399,48 +2423,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register scratch = scratch0();
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(ip, nil_value);
- __ cmp(reg, ip);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, eq);
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ b(eq, true_label);
- __ LoadRoot(ip, other_nil_value);
- __ cmp(reg, ip);
- __ b(eq, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
- }
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2514,7 +2496,7 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
+ __ SmiTst(input_reg);
EmitBranch(true_block, false_block, eq);
}
@@ -2920,9 +2902,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
count++;
}
}
+ int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
+ no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
@@ -2938,6 +2922,10 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ Jump(lr);
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
@@ -3379,8 +3367,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
@@ -3391,7 +3378,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ tst(result, Operand(kSmiTagMask));
+ __ SmiTst(result);
DeoptimizeIf(ne, instr->environment());
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
@@ -3534,7 +3521,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ b(eq, &global_object);
// Deoptimize if the receiver is not a JS object.
- __ tst(receiver, Operand(kSmiTagMask));
+ __ SmiTst(receiver);
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
DeoptimizeIf(lt, instr->environment());
@@ -4229,6 +4216,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
DeoptimizeIf(vs, instr->environment());
}
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ SmiTst(value);
+ DeoptimizeIf(eq, instr->environment());
+ }
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(instr->is_in_object());
@@ -4463,8 +4456,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
@@ -5149,14 +5141,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
- __ tst(ToRegister(input), Operand(kSmiTagMask));
+ __ SmiTst(ToRegister(input));
DeoptimizeIf(ne, instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
LOperand* input = instr->value();
- __ tst(ToRegister(input), Operand(kSmiTagMask));
+ __ SmiTst(ToRegister(input));
DeoptimizeIf(eq, instr->environment());
}
@@ -5478,92 +5470,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals = instr->hydrogen()->literals();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
- // Load map into r2.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- __ cmp(r2, Operand(boilerplate_elements_kind));
- DeoptimizeIf(ne, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r3, literals);
- __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- __ Push(r3, r2, r1);
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ Push(r3, r2, r1);
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Handle<FixedArray> literals = instr->hydrogen()->literals();
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r3, literals);
- __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r1, Operand(constant_properties));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- __ mov(r0, Operand(Smi::FromInt(flags)));
-
- // Pick the right runtime function or stub to call.
- int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
- instr->hydrogen()->depth() > 1) {
- __ Push(r3, r2, r1, r0);
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ Push(r3, r2, r1, r0);
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(r0));
__ push(r0);
@@ -5796,7 +5702,11 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(al, instr->environment());
+ if (instr->hydrogen_value()->IsSoftDeoptimize()) {
+ SoftDeoptimize(instr->environment());
+ } else {
+ DeoptimizeIf(al, instr->environment());
+ }
}
@@ -5917,7 +5827,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(r0, null_value);
DeoptimizeIf(eq, instr->environment());
- __ tst(r0, Operand(kSmiTagMask));
+ __ SmiTst(r0);
DeoptimizeIf(eq, instr->environment());
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -5985,8 +5895,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ cmp(index, Operand::Zero());
__ b(lt, &out_of_object);
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
__ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
__ b(&done);
@@ -5994,7 +5903,8 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&out_of_object);
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
// Index is equal to negated out of object property index plus 1.
- __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
__ ldr(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize));
__ bind(&done);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 294dcf2051..1a34169ebf 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -290,7 +290,11 @@ class LCodeGen BASE_EMBEDDED {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void SoftDeoptimize(LEnvironment* environment);
void AddToTranslation(Translation* translation,
LOperand* op,
@@ -387,18 +391,6 @@ class LCodeGen BASE_EMBEDDED {
Register scratch,
LEnvironment* environment);
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
void EnsureSpaceForLazyDeopt();
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
@@ -416,7 +408,7 @@ class LCodeGen BASE_EMBEDDED {
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 6e0b4a7040..a3b21a2bd5 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -495,9 +495,7 @@ void MacroAssembler::RecordWrite(Register object,
Label done;
if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- tst(value, Operand(kSmiTagMask));
- b(eq, &done);
+ JumpIfSmi(value, &done);
}
CheckPageFlag(value,
@@ -978,7 +976,7 @@ void MacroAssembler::InitializeNewString(Register string,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2) {
- mov(scratch1, Operand(length, LSL, kSmiTagSize));
+ SmiTag(scratch1, length);
LoadRoot(scratch2, map_index);
str(scratch1, FieldMemOperand(string, String::kLengthOffset));
mov(scratch1, Operand(String::kEmptyHashField));
@@ -1221,7 +1219,7 @@ void MacroAssembler::InvokeFunction(Register fun,
ldr(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
+ SmiUntag(expected_reg);
ldr(code_reg,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
@@ -1359,7 +1357,7 @@ void MacroAssembler::JumpToHandlerEntry() {
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
+ add(pc, r1, Operand::SmiUntag(r2)); // Jump
}
@@ -1575,7 +1573,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Compute the capacity mask.
ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
+ SmiUntag(t1);
sub(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
@@ -2095,14 +2093,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
b(&store);
bind(&smi_value);
- Register untagged_value = scratch1;
- SmiUntag(untagged_value, value_reg);
- vmov(s2, untagged_value);
- vcvt_f64_s32(d0, s2);
+ SmiToDouble(d0, value_reg);
bind(&store);
- add(scratch1, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
vstr(d0, FieldMemOperand(scratch1,
FixedDoubleArray::kHeaderSize - elements_offset));
}
@@ -2268,7 +2262,9 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- int stack_space) {
+ int stack_space,
+ bool returns_handle,
+ int return_value_offset) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
@@ -2314,13 +2310,20 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
-
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- cmp(r0, Operand::Zero());
- LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- ldr(r0, MemOperand(r0), ne);
-
+ Label return_value_loaded;
+
+ if (returns_handle) {
+ Label load_return_value;
+ cmp(r0, Operand::Zero());
+ b(eq, &load_return_value);
+ // derefernce returned value
+ ldr(r0, MemOperand(r0));
+ b(&return_value_loaded);
+ bind(&load_return_value);
+ }
+ // load value from ReturnValue
+ ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
+ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
str(r4, MemOperand(r7, kNextOffset));
@@ -2390,70 +2393,21 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- mov(index, Operand(hash, LSL, kSmiTagSize));
-}
-
-
-void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg) {
- // ARMv7 VFP3 instructions to implement integer to double conversion.
- mov(r7, Operand(inReg, ASR, kSmiTagSize));
- vmov(s15, r7);
- vcvt_f64_s32(d7, s15);
- vmov(outLowReg, outHighReg, d7);
-}
-
-
-void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
- DwVfpRegister result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags) {
- Label done;
- if ((flags & OBJECT_NOT_SMI) == 0) {
- Label not_smi;
- JumpIfNotSmi(object, &not_smi);
- // Remove smi tag and convert to double.
- mov(scratch1, Operand(object, ASR, kSmiTagSize));
- vmov(scratch3, scratch1);
- vcvt_f64_s32(result, scratch3);
- b(&done);
- bind(&not_smi);
- }
- // Check for heap number and load double value from it.
- ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
- sub(scratch2, object, Operand(kHeapObjectTag));
- cmp(scratch1, heap_number_map);
- b(ne, not_number);
- if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
- // If exponent is all ones the number is either a NaN or +/-Infinity.
- ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- Sbfx(scratch1,
- scratch1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // All-one value sign extend to -1.
- cmp(scratch1, Operand(-1));
- b(eq, not_number);
- }
- vldr(result, scratch2, HeapNumber::kValueOffset);
- bind(&done);
+ SmiTag(index, hash);
}
-void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2) {
- mov(scratch1, Operand(smi, ASR, kSmiTagSize));
- vmov(scratch2, scratch1);
- vcvt_f64_s32(value, scratch2);
+void MacroAssembler::SmiToDouble(DwVfpRegister value, Register smi) {
+ ASSERT(value.code() < 16);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ vmov(value.low(), smi);
+ vcvt_f64_s32(value, 1);
+ } else {
+ SmiUntag(ip, smi);
+ vmov(value.low(), ip);
+ vcvt_f64_s32(value, value.low());
+ }
}
@@ -2610,7 +2564,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
- mov(dst, Operand(src, ASR, kSmiTagSize));
+ SmiUntag(dst, src);
and_(dst, dst, Operand((1 << num_least_bits) - 1));
}
}
@@ -3005,7 +2959,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
void MacroAssembler::UntagAndJumpIfSmi(
Register dst, Register src, Label* smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ SmiUntag(dst, src, SetCC);
b(cc, smi_case); // Shifter carry is not set for a smi.
}
@@ -3013,7 +2967,7 @@ void MacroAssembler::UntagAndJumpIfSmi(
void MacroAssembler::UntagAndJumpIfNotSmi(
Register dst, Register src, Label* non_smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ SmiUntag(dst, src, SetCC);
b(cs, non_smi_case); // Shifter carry is set for a non-smi.
}
@@ -3120,7 +3074,6 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
Register scratch2,
Label* failure) {
// Check that neither is a smi.
- STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
JumpIfSmi(scratch1, failure);
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 90272911cb..50f53b3168 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -44,12 +44,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
}
-inline Operand SmiUntagOperand(Register object) {
- return Operand(object, ASR, kSmiTagSize);
-}
-
-
-
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register kRootRegister = { 10 }; // Roots array pointer.
@@ -62,16 +56,6 @@ enum TaggingMode {
DONT_TAG_RESULT
};
-// Flags used for the ObjectToDoubleVFPRegister function.
-enum ObjectToDoubleFlags {
- // No special flags.
- NO_OBJECT_TO_DOUBLE_FLAGS = 0,
- // Object is known to be a non smi.
- OBJECT_NOT_SMI = 1 << 0,
- // Don't load NaNs or infinities, branch to the non number case instead.
- AVOID_NANS_AND_INFINITIES = 1 << 1
-};
-
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
@@ -974,31 +958,9 @@ class MacroAssembler: public Assembler {
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
- // Uses VFP instructions to Convert a Smi to a double.
- void IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg);
-
- // Load the value of a number object into a VFP double register. If the object
- // is not a number a jump to the label not_number is performed and the VFP
- // double register is unchanged.
- void ObjectToDoubleVFPRegister(
- Register object,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
-
- // Load the value of a smi object into a VFP double register. The register
- // scratch1 can be the same register as smi in which case smi will hold the
- // untagged value afterwards.
- void SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2);
+ // Load the value of a smi object into a double register.
+ // The register value must be between d0 and d15.
+ void SmiToDouble(DwVfpRegister value, Register smi);
// Check if a double can be exactly represented as a signed 32-bit integer.
// Z flag set to one if true.
@@ -1125,7 +1087,10 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
+ void CallApiFunctionAndReturn(ExternalReference function,
+ int stack_space,
+ bool returns_handle,
+ int return_value_offset_from_fp);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@@ -1228,18 +1193,21 @@ class MacroAssembler: public Assembler {
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
// sets flags.
- void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
- mov(scratch, reg);
- SmiTag(scratch, SetCC);
+ void TrySmiTag(Register reg, Label* not_a_smi) {
+ TrySmiTag(reg, reg, not_a_smi);
+ }
+ void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
+ SmiTag(ip, src, SetCC);
b(vs, not_a_smi);
- mov(reg, scratch);
+ mov(reg, ip);
}
+
void SmiUntag(Register reg, SBit s = LeaveCC) {
- mov(reg, Operand(reg, ASR, kSmiTagSize), s);
+ mov(reg, Operand::SmiUntag(reg), s);
}
void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
- mov(dst, Operand(src, ASR, kSmiTagSize), s);
+ mov(dst, Operand::SmiUntag(src), s);
}
// Untag the source value into destination and jump if source is a smi.
@@ -1250,6 +1218,13 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+ // Test if the register contains a smi (Z == 0 (eq) if true).
+ inline void SmiTst(Register value) {
+ tst(value, Operand(kSmiTagMask));
+ }
+ inline void NonNegativeSmiTst(Register value) {
+ tst(value, Operand(kSmiTagMask | kSmiSignMask));
+ }
// Jump if the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index af65bc70bd..c9db167b0e 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -412,7 +412,7 @@ void ArmDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
@@ -1628,10 +1628,13 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
// This signature supports direct call to accessor getter callback.
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
int32_t arg1);
+typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
+ int32_t arg1);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1770,40 +1773,56 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
}
- } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ } else if (
+ redirection->type() == ExternalReference::DIRECT_API_CALL ||
+ redirection->type() == ExternalReference::DIRECT_API_CALL_NEW) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x",
- FUNCTION_ADDR(target), arg0);
+ reinterpret_cast<void*>(external), arg0);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ v8::Handle<v8::Value> result = target(arg0);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ }
+ set_register(r0, reinterpret_cast<int32_t>(*result));
+ } else {
+ SimulatorRuntimeDirectApiCallNew target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCallNew>(external);
+ target(arg0);
}
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ } else if (
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
- FUNCTION_ADDR(target), arg0, arg1);
+ reinterpret_cast<void*>(external), arg0, arg1);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ v8::Handle<v8::Value> result = target(arg0, arg1);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ }
+ set_register(r0, reinterpret_cast<int32_t>(*result));
+ } else {
+ SimulatorRuntimeDirectGetterCallNew target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
+ target(arg0, arg1);
}
- set_register(r0, reinterpret_cast<int32_t>(*result));
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
@@ -2698,6 +2717,7 @@ void Simulator::DecodeType7(Instruction* instr) {
// vmov :Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
+// vcvt.f64.s32 Dd, Dd, #<fbits>
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
@@ -2746,6 +2766,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
+ } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
+ (instr->Bit(8) == 1)) {
+ // vcvt.f64.s32 Dd, Dd, #<fbits>
+ int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0));
+ int fixed_value = get_sinteger_from_s_register(vd * 2);
+ double divide = 1 << fraction_bits;
+ set_d_register_from_double(vd, fixed_value / divide);
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 127bf3fdd9..b0de014511 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -516,6 +516,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_restore_name);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
@@ -685,6 +687,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
@@ -848,7 +852,7 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-static const int kFastApiCallArguments = 4;
+static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
// Reserves space for the extra arguments to API function in the
// caller's frame.
@@ -877,10 +881,11 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[4] : callee JS function
// -- sp[8] : call data
// -- sp[12] : isolate
- // -- sp[16] : last JS argument
+ // -- sp[16] : ReturnValue
+ // -- sp[20] : last JS argument
// -- ...
- // -- sp[(argc + 3) * 4] : first JS argument
- // -- sp[(argc + 4) * 4] : receiver
+ // -- sp[(argc + 4) * 4] : first JS argument
+ // -- sp[(argc + 5) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -897,11 +902,13 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
__ Move(r6, call_data);
}
__ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data and isolate.
+ // Store JS function, call data, isolate and ReturnValue.
__ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ str(r5, MemOperand(sp, 4 * kPointerSize));
// Prepare arguments.
- __ add(r2, sp, Operand(3 * kPointerSize));
+ __ add(r2, sp, Operand(4 * kPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -927,13 +934,21 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
ApiFunction fun(function_address);
+ ExternalReference::Type type =
+ returns_handle ?
+ ExternalReference::DIRECT_API_CALL :
+ ExternalReference::DIRECT_API_CALL_NEW;
ExternalReference ref = ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
+ type,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
-
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ __ CallApiFunctionAndReturn(ref,
+ kStackUnwindSpace,
+ returns_handle,
+ kFastApiCallArguments + 1);
}
@@ -1409,7 +1424,8 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ Push(reg, scratch3());
__ mov(scratch3(),
Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch3(), name());
+ __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
+ __ Push(scratch3(), scratch4(), name());
__ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
@@ -1421,12 +1437,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
- const int kStackUnwindSpace = 5;
+ const int kStackUnwindSpace = kFastApiCallArguments + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(isolate(), getter_address);
ApiFunction fun(getter_address);
- ExternalReference ref = ExternalReference(
- &fun, ExternalReference::DIRECT_GETTER_CALL, isolate());
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ ExternalReference::Type type =
+ returns_handle ?
+ ExternalReference::DIRECT_GETTER_CALL :
+ ExternalReference::DIRECT_GETTER_CALL_NEW;
+
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ CallApiFunctionAndReturn(ref,
+ kStackUnwindSpace,
+ returns_handle,
+ 3);
}
@@ -1676,8 +1701,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));
// Get the elements' length.
@@ -1697,8 +1720,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
@@ -1718,8 +1740,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));
// Get the elements' length.
@@ -1793,8 +1813,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
@@ -1831,8 +1850,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r7, Operand(new_space_allocation_top));
__ ldr(r3, MemOperand(r7));
@@ -1928,11 +1946,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// Get the last element.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
// We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on.
- __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4));
__ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ cmp(r0, r6);
__ b(eq, &call_builtin);
@@ -2154,7 +2170,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
@@ -2172,7 +2187,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Check the code is a smi.
Label slow;
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(code, &slow);
// Convert the smi code to uint16.
@@ -2226,7 +2240,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
name, &miss);
@@ -2241,8 +2254,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ ldr(r0, MemOperand(sp, 0 * kPointerSize));
// If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
+ __ SmiTst(r0);
__ Drop(argc + 1, eq);
__ Ret(eq);
@@ -2288,11 +2300,9 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ bind(&smi_check);
// Check if the result can fit into an smi. If we had an overflow,
// the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
- __ add(r1, r0, Operand(0x40000000), SetCC);
// If result doesn't fit into an smi, branch to slow.
- __ b(&slow, mi);
- // Tag the result.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ SmiTag(r0, SetCC);
+ __ b(vs, &slow);
__ bind(&just_return);
__ Drop(argc + 1);
@@ -2337,7 +2347,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
name, &miss);
@@ -2353,7 +2362,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Check if the argument is a smi.
Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(r0, &not_smi);
// Do bitwise not or do nothing depending on the sign of the
@@ -3233,8 +3241,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
Register key = r0;
Register receiver = r1;
- __ JumpIfNotSmi(key, &miss_force_generic);
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
+ __ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic);
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
__ Ret();
@@ -3266,7 +3273,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
- Register scratch1,
DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* fail) {
@@ -3284,8 +3290,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
__ b(ne, fail);
- __ TrySmiTag(scratch0, fail, scratch1);
- __ mov(key, scratch0);
+ __ TrySmiTag(key, scratch0, fail);
__ bind(&key_ok);
}
@@ -3311,7 +3316,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -3326,11 +3331,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// r3: external array.
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
+ __ UntagAndJumpIfNotSmi(r5, value, &slow);
} else {
- __ JumpIfNotSmi(value, &check_heap_number);
+ __ UntagAndJumpIfNotSmi(r5, value, &check_heap_number);
}
- __ SmiUntag(r5, value);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
@@ -3501,7 +3505,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
@@ -3535,20 +3539,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(IsFastObjectElementsKind(elements_kind));
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
__ str(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
__ RecordWrite(elements_reg, // Object.
@@ -3662,7 +3660,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 54f0b486e2..599fd5cfe9 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -416,6 +416,26 @@ function ArrayPop() {
}
+function ObservedArrayPush() {
+ var n = TO_UINT32(this.length);
+ var m = %_ArgumentsLength();
+
+ EnqueueSpliceRecord(this, n, [], 0, m);
+
+ try {
+ BeginPerformSplice(this);
+
+ for (var i = 0; i < m; i++) {
+ this[i+n] = %_Arguments(i);
+ }
+ this.length = n + m;
+ } finally {
+ EndPerformSplice(this);
+ }
+
+ return this.length;
+}
+
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
@@ -424,6 +444,9 @@ function ArrayPush() {
["Array.prototype.push"]);
}
+ if (%IsObserved(this))
+ return ObservedArrayPush.apply(this, arguments);
+
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 6abd5c55da..2d9e727e57 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -647,9 +647,17 @@ class ExternalReference BASE_EMBEDDED {
// Handle<Value> f(v8::Arguments&)
DIRECT_API_CALL,
+ // Direct call to API function callback.
+ // void f(v8::Arguments&)
+ DIRECT_API_CALL_NEW,
+
// Direct call to accessor getter callback.
// Handle<value> f(Local<String> property, AccessorInfo& info)
- DIRECT_GETTER_CALL
+ DIRECT_GETTER_CALL,
+
+ // Direct call to accessor getter callback.
+ // void f(Local<String> property, AccessorInfo& info)
+ DIRECT_GETTER_CALL_NEW
};
static void SetUp();
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 9ffb00db0d..ad7b119854 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -278,7 +278,9 @@ class SmallMapList {
int length() const { return list_.length(); }
void AddMapIfMissing(Handle<Map> map, Zone* zone) {
- map = Map::CurrentMapForDeprecated(map);
+ Map* updated = map->CurrentMapForDeprecated();
+ if (updated == NULL) return;
+ map = Handle<Map>(updated);
for (int i = 0; i < length(); ++i) {
if (at(i).is_identical_to(map)) return;
}
@@ -286,6 +288,7 @@ class SmallMapList {
}
void Add(Handle<Map> handle, Zone* zone) {
+ ASSERT(!handle->is_deprecated());
list_.Add(handle.location(), zone);
}
@@ -1992,6 +1995,18 @@ class Yield: public Expression {
Kind yield_kind() const { return yield_kind_; }
virtual int position() const { return pos_; }
+ // Delegating yield surrounds the "yield" in a "try/catch". This index
+ // locates the catch handler in the handler table, and is equivalent to
+ // TryCatchStatement::index().
+ int index() const {
+ ASSERT(yield_kind() == DELEGATING);
+ return index_;
+ }
+ void set_index(int index) {
+ ASSERT(yield_kind() == DELEGATING);
+ index_ = index;
+ }
+
protected:
Yield(Isolate* isolate,
Expression* generator_object,
@@ -2002,12 +2017,14 @@ class Yield: public Expression {
generator_object_(generator_object),
expression_(expression),
yield_kind_(yield_kind),
+ index_(-1),
pos_(pos) { }
private:
Expression* generator_object_;
Expression* expression_;
Kind yield_kind_;
+ int index_;
int pos_;
};
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index b0d3a5e50e..7c9e4366ed 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -95,6 +95,10 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
void Bootstrapper::Initialize(bool create_heap_objects) {
extensions_cache_.Initialize(create_heap_objects);
+}
+
+
+void Bootstrapper::InitializeOncePerProcess() {
GCExtension::Register();
ExternalizeStringExtension::Register();
StatisticsExtension::Register();
@@ -201,7 +205,8 @@ class Genesis BASE_EMBEDDED {
ElementsKind elements_kind);
bool InstallNatives();
- Handle<JSFunction> InstallTypedArray(const char* name);
+ Handle<JSFunction> InstallTypedArray(const char* name,
+ ElementsKind elementsKind);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
@@ -281,12 +286,12 @@ class Genesis BASE_EMBEDDED {
Handle<Context> result_;
Handle<Context> native_context_;
- // Function instance maps. Function literal maps are created initially with
- // a read only prototype for the processing of JS builtins. Later the function
- // instance maps are replaced in order to make prototype writable.
- // These are the final, writable prototype, maps.
- Handle<Map> function_instance_map_writable_prototype_;
- Handle<Map> strict_mode_function_instance_map_writable_prototype_;
+ // Function maps. Function maps are created initially with a read only
+ // prototype for the processing of JS builtins. Later the function maps are
+ // replaced in order to make prototype writable. These are the final, writable
+ // prototype, maps.
+ Handle<Map> function_map_writable_prototype_;
+ Handle<Map> strict_mode_function_map_writable_prototype_;
Handle<JSFunction> throw_type_error_function;
BootstrapperActive active_;
@@ -349,7 +354,8 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
int instance_size,
Handle<JSObject> prototype,
Builtins::Name call,
- bool is_ecma_native) {
+ bool install_initial_map,
+ bool set_instance_class_name) {
Isolate* isolate = target->GetIsolate();
Factory* factory = isolate->factory();
Handle<String> internalized_name = factory->InternalizeUtf8String(name);
@@ -361,7 +367,7 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
instance_size,
prototype,
call_code,
- is_ecma_native);
+ install_initial_map);
PropertyAttributes attributes;
if (target->IsJSBuiltinsObject()) {
attributes =
@@ -372,7 +378,7 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
target, internalized_name, function, attributes));
- if (is_ecma_native) {
+ if (set_instance_class_name) {
function->shared()->set_instance_class_name(*internalized_name);
}
function->shared()->set_native(true);
@@ -437,12 +443,6 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Allocate the map for function instances. Maps are allocated first and their
// prototypes patched later, once empty function is created.
- // Please note that the prototype property for function instances must be
- // writable.
- Handle<Map> function_instance_map =
- CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
- native_context()->set_function_instance_map(*function_instance_map);
-
// Functions with this map will not have a 'prototype' property, and
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
@@ -458,13 +458,11 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- function_instance_map_writable_prototype_ =
- CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
+ function_map_writable_prototype_ = CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
- Handle<String> object_name = Handle<String>(heap->Object_string());
+ Handle<String> object_name = factory->Object_string();
{ // --- O b j e c t ---
Handle<JSFunction> object_fun =
@@ -482,6 +480,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
TENURED);
native_context()->set_initial_object_prototype(*prototype);
+ // For bootstrapping set the array prototype to be the same as the object
+ // prototype, otherwise the missing initial_array_prototype will cause
+ // assertions during startup.
+ native_context()->set_initial_array_prototype(*prototype);
SetPrototype(object_fun, prototype);
}
@@ -509,10 +511,9 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Set prototypes for the function maps.
native_context()->function_map()->set_prototype(*empty_function);
- native_context()->function_instance_map()->set_prototype(*empty_function);
native_context()->function_without_prototype_map()->
set_prototype(*empty_function);
- function_instance_map_writable_prototype_->set_prototype(*empty_function);
+ function_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
@@ -601,12 +602,6 @@ Handle<Map> Genesis::CreateStrictModeFunctionMap(
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
- // Allocate map for the strict mode function instances.
- Handle<Map> strict_mode_function_instance_map =
- CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_instance_map(
- *strict_mode_function_instance_map);
-
// Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_mode_function_without_prototype_map =
CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
@@ -623,15 +618,13 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// The final map for the strict mode functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- strict_mode_function_instance_map_writable_prototype_ =
+ strict_mode_function_map_writable_prototype_ =
CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
// Complete the callbacks.
- PoisonArgumentsAndCaller(strict_mode_function_instance_map);
PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map);
PoisonArgumentsAndCaller(strict_mode_function_map);
- PoisonArgumentsAndCaller(
- strict_mode_function_instance_map_writable_prototype_);
+ PoisonArgumentsAndCaller(strict_mode_function_map_writable_prototype_);
}
@@ -846,7 +839,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
- Handle<String> object_name = Handle<String>(heap->Object_string());
+ Handle<String> object_name = factory->Object_string();
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
inner_global, object_name,
@@ -856,13 +849,13 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Install global Function object
InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- empty_function, Builtins::kIllegal, true); // ECMA native.
+ empty_function, Builtins::kIllegal, true, true);
{ // --- A r r a y ---
Handle<JSFunction> array_function =
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
isolate->initial_object_prototype(),
- Builtins::kArrayCode, true);
+ Builtins::kArrayCode, true, true);
array_function->shared()->DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length
@@ -906,7 +899,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> number_fun =
InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_number_function(*number_fun);
}
@@ -914,7 +907,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> boolean_fun =
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_boolean_function(*boolean_fun);
}
@@ -922,7 +915,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> string_fun =
InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
string_fun->shared()->set_construct_stub(
isolate->builtins()->builtin(Builtins::kStringConstructCode));
native_context()->set_string_function(*string_fun);
@@ -950,7 +943,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> date_fun =
InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_date_function(*date_fun);
}
@@ -961,7 +954,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> regexp_fun =
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_regexp_function(*regexp_fun);
ASSERT(regexp_fun->has_initial_map());
@@ -1276,11 +1269,18 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
-Handle<JSFunction> Genesis::InstallTypedArray(const char* name) {
+Handle<JSFunction> Genesis::InstallTypedArray(
+ const char* name, ElementsKind elementsKind) {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
- return InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
+ Handle<JSFunction> result = InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
JSTypedArray::kSize, isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, false, true);
+
+ Handle<Map> initial_map = isolate()->factory()->NewMap(
+ JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, elementsKind);
+ result->set_initial_map(*initial_map);
+ initial_map->set_constructor(*result);
+ return result;
}
@@ -1295,7 +1295,7 @@ void Genesis::InitializeExperimentalGlobal() {
Handle<JSFunction> symbol_fun =
InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_symbol_function(*symbol_fun);
}
@@ -1303,17 +1303,17 @@ void Genesis::InitializeExperimentalGlobal() {
{ // -- S e t
InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
}
{ // -- M a p
InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
}
{ // -- W e a k M a p
InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
}
}
@@ -1323,29 +1323,38 @@ void Genesis::InitializeExperimentalGlobal() {
InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSize,
isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
+ Builtins::kIllegal, true, true);
native_context()->set_array_buffer_fun(*array_buffer_fun);
}
if (FLAG_harmony_typed_arrays) {
// -- T y p e d A r r a y s
- Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array");
+ Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
+ EXTERNAL_BYTE_ELEMENTS);
native_context()->set_int8_array_fun(*int8_fun);
- Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array");
+ Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
+ EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
native_context()->set_uint8_array_fun(*uint8_fun);
- Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array");
+ Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
+ EXTERNAL_SHORT_ELEMENTS);
native_context()->set_int16_array_fun(*int16_fun);
- Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array");
+ Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
+ EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
native_context()->set_uint16_array_fun(*uint16_fun);
- Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array");
+ Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
+ EXTERNAL_INT_ELEMENTS);
native_context()->set_int32_array_fun(*int32_fun);
- Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array");
+ Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
+ EXTERNAL_UNSIGNED_INT_ELEMENTS);
native_context()->set_uint32_array_fun(*uint32_fun);
- Handle<JSFunction> float_fun = InstallTypedArray("Float32Array");
+ Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
+ EXTERNAL_FLOAT_ELEMENTS);
native_context()->set_float_array_fun(*float_fun);
- Handle<JSFunction> double_fun = InstallTypedArray("Float64Array");
+ Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
+ EXTERNAL_DOUBLE_ELEMENTS);
native_context()->set_double_array_fun(*double_fun);
- Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray");
+ Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
+ EXTERNAL_PIXEL_ELEMENTS);
native_context()->set_uint8c_array_fun(*uint8c_fun);
}
@@ -1358,11 +1367,11 @@ void Genesis::InitializeExperimentalGlobal() {
InstallFunction(builtins, "GeneratorFunctionPrototype",
JS_FUNCTION_TYPE, JSFunction::kHeaderSize,
generator_object_prototype, Builtins::kIllegal,
- false);
+ false, false);
InstallFunction(builtins, "GeneratorFunction",
JS_FUNCTION_TYPE, JSFunction::kSize,
generator_function_prototype, Builtins::kIllegal,
- false);
+ false, false);
// Create maps for generator functions and their prototypes. Store those
// maps in the native context.
@@ -1590,7 +1599,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(
JSArray::kSize,
isolate()->initial_object_prototype(),
Builtins::kInternalArrayCode,
- true);
+ true, true);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(array_function, prototype);
@@ -1690,7 +1699,7 @@ bool Genesis::InstallNatives() {
Handle<JSFunction> script_fun =
InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
isolate()->initial_object_prototype(),
- Builtins::kIllegal, false);
+ Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(script_fun, prototype);
@@ -1846,7 +1855,7 @@ bool Genesis::InstallNatives() {
InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
JSValue::kSize,
isolate()->initial_object_prototype(),
- Builtins::kIllegal, false);
+ Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(opaque_reference_fun, prototype);
@@ -1910,12 +1919,12 @@ bool Genesis::InstallNatives() {
InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
Handle<JSObject>::null(),
Builtins::kFunctionCall,
- false);
+ false, false);
Handle<JSFunction> apply =
InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
Handle<JSObject>::null(),
Builtins::kFunctionApply,
- false);
+ false, false);
// Make sure that Function.prototype.call appears to be compiled.
// The code will never be called, but inline caching for call will
@@ -2381,6 +2390,10 @@ bool Genesis::ConfigureGlobalObjects(
}
SetObjectPrototype(global_proxy, inner_global);
+
+ native_context()->set_initial_array_prototype(
+ JSArray::cast(native_context()->array_function()->prototype()));
+
return true;
}
@@ -2522,14 +2535,13 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
// The maps with writable prototype are created in CreateEmptyFunction
// and CreateStrictModeFunctionMaps respectively. Initially the maps are
// created with read-only prototype for JS builtins processing.
- ASSERT(!function_instance_map_writable_prototype_.is_null());
- ASSERT(!strict_mode_function_instance_map_writable_prototype_.is_null());
+ ASSERT(!function_map_writable_prototype_.is_null());
+ ASSERT(!strict_mode_function_map_writable_prototype_.is_null());
// Replace function instance maps to make prototype writable.
- native_context()->set_function_map(
- *function_instance_map_writable_prototype_);
+ native_context()->set_function_map(*function_map_writable_prototype_);
native_context()->set_strict_mode_function_map(
- *strict_mode_function_instance_map_writable_prototype_);
+ *strict_mode_function_map_writable_prototype_);
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index e33415eeb9..476ac12e14 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -88,6 +88,8 @@ class SourceCodeCache BASE_EMBEDDED {
// context.
class Bootstrapper {
public:
+ static void InitializeOncePerProcess();
+
// Requires: Heap::SetUp has been called.
void Initialize(bool create_heap_objects);
void TearDown();
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 661ee94371..81b600574c 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -845,7 +845,7 @@ BUILTIN(ArraySlice) {
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- relative_start = static_cast<int>(start);
+ relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -858,7 +858,7 @@ BUILTIN(ArraySlice) {
if (end < kMinInt || end > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- relative_end = static_cast<int>(end);
+ relative_end = std::isnan(end) ? 0 : static_cast<int>(end);
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -1317,15 +1317,13 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
ASSERT(raw_holder->IsJSObject());
- CustomArguments custom(isolate);
- v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
- isolate, data_obj, *function, raw_holder);
-
- v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
- custom.end(),
- &args[0] - 1,
- args.length() - 1,
- is_construct);
+ FunctionCallbackArguments custom(isolate,
+ data_obj,
+ *function,
+ raw_holder,
+ &args[0] - 1,
+ args.length() - 1,
+ is_construct);
v8::Handle<v8::Value> value;
{
@@ -1333,7 +1331,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate,
v8::ToCData<Address>(callback_obj));
- value = callback(new_args);
+ value = custom.Call(callback);
}
if (value.IsEmpty()) {
result = heap->undefined_value();
@@ -1396,21 +1394,20 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
HandleScope scope(isolate);
LOG(isolate, ApiObjectAccess("call non-function", obj));
- CustomArguments custom(isolate);
- v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
- isolate, call_data->data(), constructor, obj);
- v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
- custom.end(),
- &args[0] - 1,
- args.length() - 1,
- is_construct_call);
+ FunctionCallbackArguments custom(isolate,
+ call_data->data(),
+ constructor,
+ obj,
+ &args[0] - 1,
+ args.length() - 1,
+ is_construct_call);
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate,
v8::ToCData<Address>(callback_obj));
- value = callback(new_args);
+ value = custom.Call(callback);
}
if (value.IsEmpty()) {
result = heap->undefined_value();
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 6fc17c45c0..58d1a8b147 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -107,6 +107,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
@@ -380,6 +382,7 @@ class Builtins {
static void Generate_LazyCompile(MacroAssembler* masm);
static void Generate_LazyRecompile(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
+ static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index a6405ecdec..8bcde1c61c 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -53,7 +53,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
if (fatal_error_handler_nesting_depth < 3) {
if (i::FLAG_stack_trace_on_abort) {
// Call this one twice on double fault
- i::Isolate::Current()->PrintStack();
+ i::Isolate::Current()->PrintStack(stderr);
}
}
i::OS::Abort();
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 31431b71ca..6e837ddb95 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -418,7 +418,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- false, STANDARD_STORE, Representation::Tagged());
+ false, NEVER_RETURN_HOLE, STANDARD_STORE, Representation::Tagged());
return load;
}
@@ -463,7 +463,8 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- true, casted_stub()->store_mode(), Representation::Tagged());
+ true, NEVER_RETURN_HOLE, casted_stub()->store_mode(),
+ Representation::Tagged());
return GetParameter(2);
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index aa2c82172c..312febc1aa 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -232,37 +232,37 @@ void BinaryOpStub::Generate(MacroAssembler* masm) {
void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION);
break;
case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION);
break;
case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION);
break;
case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION);
break;
case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION);
break;
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION);
break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION);
break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION);
break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION);
break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION);
break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ __ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION);
break;
default:
UNREACHABLE();
@@ -408,41 +408,50 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
}
-CompareNilICStub::Types CompareNilICStub::GetPatchedICFlags(
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- bool* already_monomorphic) {
- Types types = TypesField::decode(extra_ic_state);
- NilValue nil = NilValueField::decode(extra_ic_state);
- EqualityKind kind = EqualityKindField::decode(extra_ic_state);
- ASSERT(types != CompareNilICStub::kFullCompare);
- *already_monomorphic =
- (types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0;
- if (kind == kStrictEquality) {
- if (nil == kNullValue) {
- return CompareNilICStub::kCompareAgainstNull;
- } else {
- return CompareNilICStub::kCompareAgainstUndefined;
- }
+void CompareNilICStub::Record(Handle<Object> object) {
+ ASSERT(types_ != Types::FullCompare());
+ if (equality_kind_ == kStrictEquality) {
+ // When testing for strict equality only one value will evaluate to true
+ types_.RemoveAll();
+ types_.Add((nil_value_ == kNullValue) ? NULL_TYPE:
+ UNDEFINED);
} else {
if (object->IsNull()) {
- types = static_cast<CompareNilICStub::Types>(
- types | CompareNilICStub::kCompareAgainstNull);
+ types_.Add(NULL_TYPE);
} else if (object->IsUndefined()) {
- types = static_cast<CompareNilICStub::Types>(
- types | CompareNilICStub::kCompareAgainstUndefined);
+ types_.Add(UNDEFINED);
} else if (object->IsUndetectableObject() ||
object->IsOddball() ||
!object->IsHeapObject()) {
- types = CompareNilICStub::kFullCompare;
- } else if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) {
- types = CompareNilICStub::kFullCompare;
+ types_ = Types::FullCompare();
+ } else if (IsMonomorphic()) {
+ types_ = Types::FullCompare();
} else {
- types = static_cast<CompareNilICStub::Types>(
- types | CompareNilICStub::kCompareAgainstMonomorphicMap);
+ types_.Add(MONOMORPHIC_MAP);
}
}
- return types;
+}
+
+
+void CompareNilICStub::PrintName(StringStream* stream) {
+ stream->Add("CompareNilICStub_");
+ types_.Print(stream);
+ stream->Add((nil_value_ == kNullValue) ? "(NullValue|":
+ "(UndefinedValue|");
+ stream->Add((equality_kind_ == kStrictEquality) ? "StrictEquality)":
+ "NonStrictEquality)");
+}
+
+
+void CompareNilICStub::Types::Print(StringStream* stream) const {
+ stream->Add("(");
+ SimpleListPrinter printer(stream);
+ if (IsEmpty()) printer.Add("None");
+ if (Contains(UNDEFINED)) printer.Add("Undefined");
+ if (Contains(NULL_TYPE)) printer.Add("Null");
+ if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap");
+ if (Contains(UNDETECTABLE)) printer.Add("Undetectable");
+ stream->Add(")");
}
@@ -552,15 +561,18 @@ void ToBooleanStub::PrintName(StringStream* stream) {
void ToBooleanStub::Types::Print(StringStream* stream) const {
- if (IsEmpty()) stream->Add("None");
- if (Contains(UNDEFINED)) stream->Add("Undefined");
- if (Contains(BOOLEAN)) stream->Add("Bool");
- if (Contains(NULL_TYPE)) stream->Add("Null");
- if (Contains(SMI)) stream->Add("Smi");
- if (Contains(SPEC_OBJECT)) stream->Add("SpecObject");
- if (Contains(STRING)) stream->Add("String");
- if (Contains(SYMBOL)) stream->Add("Symbol");
- if (Contains(HEAP_NUMBER)) stream->Add("HeapNumber");
+ stream->Add("(");
+ SimpleListPrinter printer(stream);
+ if (IsEmpty()) printer.Add("None");
+ if (Contains(UNDEFINED)) printer.Add("Undefined");
+ if (Contains(BOOLEAN)) printer.Add("Bool");
+ if (Contains(NULL_TYPE)) printer.Add("Null");
+ if (Contains(SMI)) printer.Add("Smi");
+ if (Contains(SPEC_OBJECT)) printer.Add("SpecObject");
+ if (Contains(STRING)) printer.Add("String");
+ if (Contains(SYMBOL)) printer.Add("Symbol");
+ if (Contains(HEAP_NUMBER)) printer.Add("HeapNumber");
+ stream->Add(")");
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 646aee23eb..aa6a410195 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -1047,41 +1047,52 @@ class ICCompareStub: public PlatformCodeStub {
class CompareNilICStub : public HydrogenCodeStub {
public:
- enum Types {
- kCompareAgainstNull = 1 << 0,
- kCompareAgainstUndefined = 1 << 1,
- kCompareAgainstMonomorphicMap = 1 << 2,
- kCompareAgainstUndetectable = 1 << 3,
- kFullCompare = kCompareAgainstNull | kCompareAgainstUndefined |
- kCompareAgainstUndetectable
+ enum Type {
+ UNDEFINED,
+ NULL_TYPE,
+ MONOMORPHIC_MAP,
+ UNDETECTABLE,
+ NUMBER_OF_TYPES
+ };
+
+ class Types : public EnumSet<Type, byte> {
+ public:
+ Types() : EnumSet<Type, byte>(0) { }
+ explicit Types(byte bits) : EnumSet<Type, byte>(bits) { }
+
+ static Types FullCompare() {
+ Types set;
+ set.Add(UNDEFINED);
+ set.Add(NULL_TYPE);
+ set.Add(UNDETECTABLE);
+ return set;
+ }
+
+ void Print(StringStream* stream) const;
};
+ // At most 6 different types can be distinguished, because the Code object
+ // only has room for a single byte to hold a set and there are two more
+ // boolean flags we need to store. :-P
+ STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
+
CompareNilICStub(EqualityKind kind, NilValue nil, Types types)
- : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), bit_field_(0) {
- bit_field_ = EqualityKindField::encode(kind) |
- NilValueField::encode(nil) |
- TypesField::encode(types);
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), types_(types) {
+ equality_kind_ = kind;
+ nil_value_ = nil;
}
- virtual InlineCacheState GetICState() {
- Types types = GetTypes();
- if (types == kFullCompare) {
- return MEGAMORPHIC;
- } else if ((types & kCompareAgainstMonomorphicMap) != 0) {
- return MONOMORPHIC;
- } else {
- return PREMONOMORPHIC;
- }
+ explicit CompareNilICStub(Code::ExtraICState ic_state)
+ : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
+ equality_kind_ = EqualityKindField::decode(ic_state);
+ nil_value_ = NilValueField::decode(ic_state);
+ types_ = Types(ExtractTypesFromExtraICState(ic_state));
}
- virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
-
- Handle<Code> GenerateCode();
-
static Handle<Code> GetUninitialized(Isolate* isolate,
EqualityKind kind,
NilValue nil) {
- return CompareNilICStub(kind, nil).GetCode(isolate);
+ return CompareNilICStub(kind, nil, CODE_STUB_IS_MISS).GetCode(isolate);
}
virtual void InitializeInterfaceDescriptor(
@@ -1089,53 +1100,76 @@ class CompareNilICStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor);
static void InitializeForIsolate(Isolate* isolate) {
- CompareNilICStub compare_stub(kStrictEquality, kNullValue);
+ CompareNilICStub compare_stub(kStrictEquality, kNullValue,
+ CODE_STUB_IS_MISS);
compare_stub.InitializeInterfaceDescriptor(
isolate,
isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
}
- virtual Code::ExtraICState GetExtraICState() {
- return bit_field_;
+ virtual InlineCacheState GetICState() {
+ if (types_ == Types::FullCompare()) {
+ return MEGAMORPHIC;
+ } else if (types_.Contains(MONOMORPHIC_MAP)) {
+ return MONOMORPHIC;
+ } else {
+ return PREMONOMORPHIC;
+ }
}
- EqualityKind GetKind() { return EqualityKindField::decode(bit_field_); }
- NilValue GetNilValue() { return NilValueField::decode(bit_field_); }
- Types GetTypes() { return TypesField::decode(bit_field_); }
+ virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
- static Types TypesFromExtraICState(
- Code::ExtraICState state) {
- return TypesField::decode(state);
+ Handle<Code> GenerateCode();
+
+ // extra ic state = nil_value | equality_kind | type_n-1 | ... | type_0
+ virtual Code::ExtraICState GetExtraICState() {
+ return NilValueField::encode(nil_value_) |
+ EqualityKindField::encode(equality_kind_) |
+ types_.ToIntegral();
}
- static EqualityKind EqualityKindFromExtraICState(
+ static byte ExtractTypesFromExtraICState(
Code::ExtraICState state) {
- return EqualityKindField::decode(state);
- }
- static NilValue NilValueFromExtraICState(Code::ExtraICState state) {
- return NilValueField::decode(state);
+ return state & ((1<<NUMBER_OF_TYPES)-1);
}
- static Types GetPatchedICFlags(Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- bool* already_monomorphic);
+ void Record(Handle<Object> object);
+
+ bool IsMonomorphic() const { return types_.Contains(MONOMORPHIC_MAP); }
+ EqualityKind GetKind() const { return equality_kind_; }
+ NilValue GetNilValue() const { return nil_value_; }
+ Types GetTypes() const { return types_; }
+ void ClearTypes() { types_.RemoveAll(); }
+ void SetKind(EqualityKind kind) { equality_kind_ = kind; }
+
+ virtual void PrintName(StringStream* stream);
private:
friend class CompareNilIC;
- class EqualityKindField : public BitField<EqualityKind, 0, 1> {};
- class NilValueField : public BitField<NilValue, 1, 1> {};
- class TypesField : public BitField<Types, 3, 4> {};
+ CompareNilICStub(EqualityKind kind, NilValue nil,
+ InitializationState init_state)
+ : HydrogenCodeStub(init_state), types_(0) {
+ equality_kind_ = kind;
+ nil_value_ = nil;
+ }
- CompareNilICStub(EqualityKind kind, NilValue nil)
- : HydrogenCodeStub(CODE_STUB_IS_MISS), bit_field_(0) {
- bit_field_ = EqualityKindField::encode(kind) |
- NilValueField::encode(nil);
+ CompareNilICStub(Code::ExtraICState ic_state, InitializationState init_state)
+ : HydrogenCodeStub(init_state) {
+ equality_kind_ = EqualityKindField::decode(ic_state);
+ nil_value_ = NilValueField::decode(ic_state);
+ types_ = Types(ExtractTypesFromExtraICState(ic_state));
}
+ class EqualityKindField : public BitField<EqualityKind, NUMBER_OF_TYPES, 1> {
+ };
+ class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES+1, 1> {};
+
virtual CodeStub::Major MajorKey() { return CompareNilIC; }
- virtual int NotMissMinorKey() { return bit_field_; }
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
- int bit_field_;
+ EqualityKind equality_kind_;
+ NilValue nil_value_;
+ Types types_;
DISALLOW_COPY_AND_ASSIGN(CompareNilICStub);
};
@@ -1795,26 +1829,17 @@ class ToBooleanStub: public PlatformCodeStub {
// only has room for a single byte to hold a set of these types. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 8);
- class Types {
+ class Types : public EnumSet<Type, byte> {
public:
Types() {}
- explicit Types(byte bits) : set_(bits) {}
+ explicit Types(byte bits) : EnumSet<Type, byte>(bits) {}
- bool IsEmpty() const { return set_.IsEmpty(); }
- bool Contains(Type type) const { return set_.Contains(type); }
- bool ContainsAnyOf(Types types) const {
- return set_.ContainsAnyOf(types.set_);
- }
- void Add(Type type) { set_.Add(type); }
- byte ToByte() const { return set_.ToIntegral(); }
+ byte ToByte() const { return ToIntegral(); }
void Print(StringStream* stream) const;
void TraceTransition(Types to) const;
bool Record(Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const;
-
- private:
- EnumSet<Type, byte> set_;
};
static Types no_types() { return Types(); }
@@ -1831,7 +1856,8 @@ class ToBooleanStub: public PlatformCodeStub {
private:
Major MajorKey() { return ToBoolean; }
- int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
+ int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) |
+ types_.ToByte(); }
virtual void FinishCode(Handle<Code> code) {
code->set_to_boolean_state(types_.ToByte());
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index b7ff92a7c8..504575803d 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -103,6 +103,8 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
code_stub_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
+ no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
+ ? new List<OffsetRange>(2) : NULL;
if (mode == STUB) {
mode_ = STUB;
return;
@@ -121,6 +123,7 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
CompilationInfo::~CompilationInfo() {
delete deferred_handles_;
+ delete no_frame_ranges_;
}
@@ -216,9 +219,8 @@ void OptimizingCompiler::RecordOptimizationStats() {
double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
if (FLAG_trace_opt) {
- PrintF("[optimizing: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
+ PrintF("[optimizing ");
+ function->ShortPrint();
PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
ms_codegen);
}
@@ -315,15 +317,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
// Take --hydrogen-filter into account.
- Handle<String> name = info()->function()->debug_name();
- if (*FLAG_hydrogen_filter != '\0') {
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- if ((filter[0] == '-'
- && name->IsUtf8EqualTo(filter.SubVector(1, filter.length())))
- || (filter[0] != '-' && !name->IsUtf8EqualTo(filter))) {
+ if (!info()->closure()->PassesHydrogenFilter()) {
info()->SetCode(code);
return SetLastStatus(BAILED_OUT);
- }
}
// Recompile the unoptimized version of the code if the current version
@@ -360,6 +356,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
ASSERT(info()->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
+ Handle<String> name = info()->function()->debug_name();
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
isolate()->GetHTracer()->TraceCompilation(info());
@@ -574,6 +571,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
+ info,
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
@@ -586,6 +584,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
+ info,
isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
}
@@ -813,6 +812,10 @@ static void InstallCodeCommon(CompilationInfo* info) {
// reset this bit when lazy compiling the code again.
if (shared->optimization_disabled()) code->set_optimizable(false);
+ if (shared->code() == *code) {
+ // Do not send compilation event for the same code twice.
+ return;
+ }
Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
}
@@ -843,9 +846,9 @@ static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
int index = shared->SearchOptimizedCodeMap(*native_context);
if (index > 0) {
if (FLAG_trace_opt) {
- PrintF("[found optimized code for: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(*function));
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ PrintF("]\n");
}
// Caching of optimized code enabled and optimized code found.
shared->InstallFromOptimizedCodeMap(*function, index);
@@ -1157,6 +1160,7 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
+ info,
String::cast(script->name()),
line_num));
} else {
@@ -1164,6 +1168,7 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
+ info,
shared->DebugName()));
}
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 00074c899b..dbb513ccdb 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -47,6 +47,12 @@ enum ParseRestriction {
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
+struct OffsetRange {
+ OffsetRange(int from, int to) : from(from), to(to) {}
+ int from;
+ int to;
+};
+
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
class CompilationInfo {
@@ -257,6 +263,20 @@ class CompilationInfo {
prologue_offset_ = prologue_offset;
}
+ // Adds offset range [from, to) where fp register does not point
+ // to the current frame base. Used in CPU profiler to detect stack
+ // samples where top frame is not set up.
+ inline void AddNoFrameRange(int from, int to) {
+ if (no_frame_ranges_) no_frame_ranges_->Add(OffsetRange(from, to));
+ }
+
+ List<OffsetRange>* ReleaseNoFrameRanges() {
+ List<OffsetRange>* result = no_frame_ranges_;
+ no_frame_ranges_ = NULL;
+ return result;
+ }
+
+
private:
Isolate* isolate_;
@@ -361,6 +381,8 @@ class CompilationInfo {
int prologue_offset_;
+ List<OffsetRange>* no_frame_ranges_;
+
// A copy of shared_info()->opt_count() to avoid handle deref
// during graph optimization.
int opt_count_;
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 434b274147..86406e5a09 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -112,6 +112,7 @@ enum BindingFlags {
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+ V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
@@ -138,9 +139,6 @@ enum BindingFlags {
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_mode_function_without_prototype_map) \
- V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
- V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
- strict_mode_function_instance_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
@@ -260,9 +258,8 @@ class Context: public FixedArray {
STRICT_MODE_FUNCTION_MAP_INDEX,
FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- FUNCTION_INSTANCE_MAP_INDEX,
- STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
+ INITIAL_ARRAY_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX,
STRING_FUNCTION_INDEX,
@@ -434,6 +431,10 @@ class Context: public FixedArray {
ASSERT(IsNativeContext()); \
set(index, value); \
} \
+ bool is_##name(type* value) { \
+ ASSERT(IsNativeContext()); \
+ return type::cast(get(index)) == value; \
+ } \
type* name() { \
ASSERT(IsNativeContext()); \
return type::cast(get(index)); \
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index 51d29423c4..c30d4d44f2 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -29,6 +29,7 @@
#include "cpu-profiler-inl.h"
+#include "compiler.h"
#include "frames-inl.h"
#include "hashmap.h"
#include "log-inl.h"
@@ -80,7 +81,8 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
int line_number,
Address start,
unsigned size,
- Address shared) {
+ Address shared,
+ CompilationInfo* info) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -88,6 +90,9 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->order = ++enqueue_order_;
rec->start = start;
rec->entry = profiles_->NewCodeEntry(tag, name, resource_name, line_number);
+ if (info) {
+ rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
+ }
rec->size = size;
rec->shared = shared;
events_buffer_.Enqueue(evt_rec);
@@ -323,6 +328,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
+ NULL,
NULL);
}
@@ -330,6 +336,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
+ CompilationInfo* info,
Name* name) {
processor_->CodeCreateEvent(
tag,
@@ -338,13 +345,15 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
- shared->address());
+ shared->address(),
+ info);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
+ CompilationInfo* info,
String* source, int line) {
processor_->CodeCreateEvent(
tag,
@@ -353,7 +362,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
line,
code->address(),
code->ExecutableSize(),
- shared->address());
+ shared->address(),
+ info);
}
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index da7ea6de24..2f8479fcca 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -40,6 +40,7 @@ namespace internal {
// Forward declarations.
class CodeEntry;
class CodeMap;
+class CompilationInfo;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
@@ -142,7 +143,8 @@ class ProfilerEventsProcessor : public Thread {
Name* name,
String* resource_name, int line_number,
Address start, unsigned size,
- Address shared);
+ Address shared,
+ CompilationInfo* info);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name,
Address start, unsigned size);
@@ -227,11 +229,13 @@ class CpuProfiler {
Code* code, Name* name);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
- SharedFunctionInfo* shared,
- Name* name);
+ SharedFunctionInfo* shared,
+ CompilationInfo* info,
+ Name* name);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
+ CompilationInfo* info,
String* source, int line);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 1889556e16..b95432e269 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -89,38 +89,15 @@ static Handle<Value> Throw(const char* message) {
}
-// TODO(rossberg): should replace these by proper uses of HasInstance,
-// once we figure out a good way to make the templates global.
-const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
-const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
-
-
-#define FOR_EACH_STRING(V) \
- V(ArrayBuffer, "ArrayBuffer") \
- V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \
- V(ArrayMarkerPropName, kArrayMarkerPropName) \
- V(buffer, "buffer") \
- V(byteLength, "byteLength") \
- V(byteOffset, "byteOffset") \
- V(BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \
- V(length, "length")
-
class PerIsolateData {
public:
explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) {
HandleScope scope(isolate);
-#define INIT_STRING(name, value) \
- name##_string_ = Persistent<String>::New(isolate, String::NewSymbol(value));
- FOR_EACH_STRING(INIT_STRING)
-#undef INIT_STRING
isolate->SetData(this);
}
~PerIsolateData() {
-#define DISPOSE_STRING(name, value) name##_string_.Dispose(isolate_);
- FOR_EACH_STRING(DISPOSE_STRING)
-#undef DISPOSE_STRING
isolate_->SetData(NULL); // Not really needed, just to be sure...
}
@@ -128,13 +105,6 @@ class PerIsolateData {
return reinterpret_cast<PerIsolateData*>(isolate->GetData());
}
-#define DEFINE_STRING_GETTER(name, value) \
- static Handle<String> name##_string(Isolate* isolate) { \
- return Handle<String>(*Get(isolate)->name##_string_); \
- }
- FOR_EACH_STRING(DEFINE_STRING_GETTER)
-#undef DEFINE_STRING_GETTER
-
class RealmScope {
public:
explicit RealmScope(PerIsolateData* data);
@@ -153,10 +123,6 @@ class PerIsolateData {
Persistent<Context>* realms_;
Persistent<Value> realm_shared_;
-#define DEFINE_MEMBER(name, value) Persistent<String> name##_string_;
- FOR_EACH_STRING(DEFINE_MEMBER)
-#undef DEFINE_MEMBER
-
int RealmFind(Handle<Context> context);
};
@@ -561,565 +527,6 @@ Handle<Value> Shell::Load(const Arguments& args) {
return Undefined(args.GetIsolate());
}
-static int32_t convertToInt(Local<Value> value_in, TryCatch* try_catch) {
- if (value_in->IsInt32()) {
- return value_in->Int32Value();
- }
-
- Local<Value> number = value_in->ToNumber();
- if (try_catch->HasCaught()) return 0;
-
- ASSERT(number->IsNumber());
- Local<Int32> int32 = number->ToInt32();
- if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
-
- int32_t value = int32->Int32Value();
- if (try_catch->HasCaught()) return 0;
-
- return value;
-}
-
-
-static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
- int32_t raw_value = convertToInt(value_in, try_catch);
- if (try_catch->HasCaught()) return 0;
-
- if (raw_value < 0) {
- Throw("Array length must not be negative.");
- return 0;
- }
-
- static const int kMaxLength = 0x3fffffff;
-#ifndef V8_SHARED
- ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
-#endif // V8_SHARED
- if (raw_value > static_cast<int32_t>(kMaxLength)) {
- Throw("Array length exceeds maximum length.");
- }
- return raw_value;
-}
-
-
-Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
- Handle<Object> buffer,
- int32_t length) {
- static const int32_t kMaxSize = 0x7fffffff;
- // Make sure the total size fits into a (signed) int.
- if (length < 0 || length > kMaxSize) {
- return Throw("ArrayBuffer exceeds maximum size (2G)");
- }
- uint8_t* data = new uint8_t[length];
- if (data == NULL) {
- return Throw("Memory allocation failed");
- }
- memset(data, 0, length);
-
- buffer->SetHiddenValue(
- PerIsolateData::ArrayBufferMarkerPropName_string(isolate), True());
- Persistent<Object> persistent_array =
- Persistent<Object>::New(isolate, buffer);
- persistent_array.MakeWeak(isolate, data, ExternalArrayWeakCallback);
- persistent_array.MarkIndependent(isolate);
- isolate->AdjustAmountOfExternalAllocatedMemory(length);
-
- buffer->SetIndexedPropertiesToExternalArrayData(
- data, v8::kExternalByteArray, length);
- buffer->Set(PerIsolateData::byteLength_string(isolate),
- Int32::New(length, isolate),
- ReadOnly);
-
- return buffer;
-}
-
-
-Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
- if (!args.IsConstructCall()) {
- Handle<Value>* rec_args = new Handle<Value>[args.Length()];
- for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
- Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
- delete[] rec_args;
- return result;
- }
-
- if (args.Length() == 0) {
- return Throw("ArrayBuffer constructor must have one argument");
- }
- TryCatch try_catch;
- int32_t length = convertToUint(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- return CreateExternalArrayBuffer(args.GetIsolate(), args.This(), length);
-}
-
-
-Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
- Handle<Object> array,
- Handle<Object> buffer,
- ExternalArrayType type,
- int32_t length,
- int32_t byteLength,
- int32_t byteOffset,
- int32_t element_size) {
- ASSERT(element_size == 1 || element_size == 2 ||
- element_size == 4 || element_size == 8);
- ASSERT(byteLength == length * element_size);
-
- void* data = buffer->GetIndexedPropertiesExternalArrayData();
- ASSERT(data != NULL);
-
- array->SetIndexedPropertiesToExternalArrayData(
- static_cast<uint8_t*>(data) + byteOffset, type, length);
- array->SetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate),
- Int32::New(type, isolate));
- array->Set(PerIsolateData::byteLength_string(isolate),
- Int32::New(byteLength, isolate),
- ReadOnly);
- array->Set(PerIsolateData::byteOffset_string(isolate),
- Int32::New(byteOffset, isolate),
- ReadOnly);
- array->Set(PerIsolateData::length_string(isolate),
- Int32::New(length, isolate),
- ReadOnly);
- array->Set(PerIsolateData::BYTES_PER_ELEMENT_string(isolate),
- Int32::New(element_size, isolate));
- array->Set(PerIsolateData::buffer_string(isolate),
- buffer,
- ReadOnly);
-
- return array;
-}
-
-
-Handle<Value> Shell::CreateExternalArray(const Arguments& args,
- ExternalArrayType type,
- int32_t element_size) {
- Isolate* isolate = args.GetIsolate();
- if (!args.IsConstructCall()) {
- Handle<Value>* rec_args = new Handle<Value>[args.Length()];
- for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
- Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
- delete[] rec_args;
- return result;
- }
-
- TryCatch try_catch;
- ASSERT(element_size == 1 || element_size == 2 ||
- element_size == 4 || element_size == 8);
-
- // All of the following constructors are supported:
- // TypedArray(unsigned long length)
- // TypedArray(type[] array)
- // TypedArray(TypedArray array)
- // TypedArray(ArrayBuffer buffer,
- // optional unsigned long byteOffset,
- // optional unsigned long length)
- Handle<Object> buffer;
- int32_t length;
- int32_t byteLength;
- int32_t byteOffset;
- bool init_from_array = false;
- if (args.Length() == 0) {
- return Throw("Array constructor must have at least one argument");
- }
- if (args[0]->IsObject() &&
- !args[0]->ToObject()->GetHiddenValue(
- PerIsolateData::ArrayBufferMarkerPropName_string(isolate)).IsEmpty()) {
- // Construct from ArrayBuffer.
- buffer = args[0]->ToObject();
- int32_t bufferLength = convertToUint(
- buffer->Get(PerIsolateData::byteLength_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- byteOffset = 0;
- } else {
- byteOffset = convertToUint(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (byteOffset > bufferLength) {
- return Throw("byteOffset out of bounds");
- }
- if (byteOffset % element_size != 0) {
- return Throw("byteOffset must be multiple of element size");
- }
- }
-
- if (args.Length() < 3 || args[2]->IsUndefined()) {
- byteLength = bufferLength - byteOffset;
- length = byteLength / element_size;
- if (byteLength % element_size != 0) {
- return Throw("buffer size must be multiple of element size");
- }
- } else {
- length = convertToUint(args[2], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- byteLength = length * element_size;
- if (byteOffset + byteLength > bufferLength) {
- return Throw("length out of bounds");
- }
- }
- } else {
- if (args[0]->IsObject() &&
- args[0]->ToObject()->Has(PerIsolateData::length_string(isolate))) {
- // Construct from array.
- Local<Value> value =
- args[0]->ToObject()->Get(PerIsolateData::length_string(isolate));
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- length = convertToUint(value, &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- init_from_array = true;
- } else {
- // Construct from size.
- length = convertToUint(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
- byteLength = length * element_size;
- byteOffset = 0;
-
- Handle<Object> global = Context::GetCurrent()->Global();
- Handle<Value> array_buffer =
- global->Get(PerIsolateData::ArrayBuffer_string(isolate));
- ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
- Handle<Value> buffer_args[] = { Uint32::New(byteLength, isolate) };
- Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
- 1, buffer_args);
- if (try_catch.HasCaught()) return result;
- buffer = result->ToObject();
- }
-
- Handle<Object> array =
- CreateExternalArray(isolate, args.This(), buffer, type, length,
- byteLength, byteOffset, element_size);
-
- if (init_from_array) {
- Handle<Object> init = args[0]->ToObject();
- for (int i = 0; i < length; ++i) {
- Local<Value> value = init->Get(i);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- array->Set(i, value);
- }
- }
-
- return array;
-}
-
-
-Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
- TryCatch try_catch;
-
- if (!args.This()->IsObject()) {
- return Throw("'slice' invoked on non-object receiver");
- }
-
- Isolate* isolate = args.GetIsolate();
- Local<Object> self = args.This();
- Local<Value> marker = self->GetHiddenValue(
- PerIsolateData::ArrayBufferMarkerPropName_string(isolate));
- if (marker.IsEmpty()) {
- return Throw("'slice' invoked on wrong receiver type");
- }
-
- int32_t length = convertToUint(
- self->Get(PerIsolateData::byteLength_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() == 0) {
- return Throw("'slice' must have at least one argument");
- }
- int32_t begin = convertToInt(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (begin < 0) begin += length;
- if (begin < 0) begin = 0;
- if (begin > length) begin = length;
-
- int32_t end;
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- end = length;
- } else {
- end = convertToInt(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (end < 0) end += length;
- if (end < 0) end = 0;
- if (end > length) end = length;
- if (end < begin) end = begin;
- }
-
- Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
- Handle<Value> new_args[] = { Uint32::New(end - begin, isolate) };
- Handle<Value> result = constructor->NewInstance(1, new_args);
- if (try_catch.HasCaught()) return result;
- Handle<Object> buffer = result->ToObject();
- uint8_t* dest =
- static_cast<uint8_t*>(buffer->GetIndexedPropertiesExternalArrayData());
- uint8_t* src = begin + static_cast<uint8_t*>(
- self->GetIndexedPropertiesExternalArrayData());
- memcpy(dest, src, end - begin);
-
- return buffer;
-}
-
-
-Handle<Value> Shell::ArraySubArray(const Arguments& args) {
- TryCatch try_catch;
-
- if (!args.This()->IsObject()) {
- return Throw("'subarray' invoked on non-object receiver");
- }
-
- Isolate* isolate = args.GetIsolate();
- Local<Object> self = args.This();
- Local<Value> marker =
- self->GetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate));
- if (marker.IsEmpty()) {
- return Throw("'subarray' invoked on wrong receiver type");
- }
-
- Handle<Object> buffer =
- self->Get(PerIsolateData::buffer_string(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t length = convertToUint(
- self->Get(PerIsolateData::length_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t byteOffset = convertToUint(
- self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t element_size = convertToUint(
- self->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() == 0) {
- return Throw("'subarray' must have at least one argument");
- }
- int32_t begin = convertToInt(args[0], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (begin < 0) begin += length;
- if (begin < 0) begin = 0;
- if (begin > length) begin = length;
-
- int32_t end;
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- end = length;
- } else {
- end = convertToInt(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- if (end < 0) end += length;
- if (end < 0) end = 0;
- if (end > length) end = length;
- if (end < begin) end = begin;
- }
-
- length = end - begin;
- byteOffset += begin * element_size;
-
- Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
- Handle<Value> construct_args[] = {
- buffer, Uint32::New(byteOffset, isolate), Uint32::New(length, isolate)
- };
- return constructor->NewInstance(3, construct_args);
-}
-
-
-Handle<Value> Shell::ArraySet(const Arguments& args) {
- TryCatch try_catch;
-
- if (!args.This()->IsObject()) {
- return Throw("'set' invoked on non-object receiver");
- }
-
- Isolate* isolate = args.GetIsolate();
- Local<Object> self = args.This();
- Local<Value> marker =
- self->GetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate));
- if (marker.IsEmpty()) {
- return Throw("'set' invoked on wrong receiver type");
- }
- int32_t length = convertToUint(
- self->Get(PerIsolateData::length_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t element_size = convertToUint(
- self->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (args.Length() == 0) {
- return Throw("'set' must have at least one argument");
- }
- if (!args[0]->IsObject() ||
- !args[0]->ToObject()->Has(PerIsolateData::length_string(isolate))) {
- return Throw("'set' invoked with non-array argument");
- }
- Handle<Object> source = args[0]->ToObject();
- int32_t source_length = convertToUint(
- source->Get(PerIsolateData::length_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- int32_t offset;
- if (args.Length() < 2 || args[1]->IsUndefined()) {
- offset = 0;
- } else {
- offset = convertToUint(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
- if (offset + source_length > length) {
- return Throw("offset or source length out of bounds");
- }
-
- int32_t source_element_size;
- if (source->GetHiddenValue(
- PerIsolateData::ArrayMarkerPropName_string(isolate)).IsEmpty()) {
- source_element_size = 0;
- } else {
- source_element_size = convertToUint(
- source->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)),
- &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
-
- if (element_size == source_element_size &&
- self->GetConstructor()->StrictEquals(source->GetConstructor())) {
- // Use memmove on the array buffers.
- Handle<Object> buffer =
- self->Get(PerIsolateData::buffer_string(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- Handle<Object> source_buffer =
- source->Get(PerIsolateData::buffer_string(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t byteOffset = convertToUint(
- self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t source_byteOffset = convertToUint(
- source->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
- buffer->GetIndexedPropertiesExternalArrayData());
- uint8_t* src = source_byteOffset + static_cast<uint8_t*>(
- source_buffer->GetIndexedPropertiesExternalArrayData());
- memmove(dest, src, source_length * element_size);
- } else if (source_element_size == 0) {
- // Source is not a typed array, copy element-wise sequentially.
- for (int i = 0; i < source_length; ++i) {
- self->Set(offset + i, source->Get(i));
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- }
- } else {
- // Need to copy element-wise to make the right conversions.
- Handle<Object> buffer =
- self->Get(PerIsolateData::buffer_string(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- Handle<Object> source_buffer =
- source->Get(PerIsolateData::buffer_string(isolate))->ToObject();
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- if (buffer->StrictEquals(source_buffer)) {
- // Same backing store, need to handle overlap correctly.
- // This gets a bit tricky in the case of different element sizes
- // (which, of course, is extremely unlikely to ever occur in practice).
- int32_t byteOffset = convertToUint(
- self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
- int32_t source_byteOffset = convertToUint(
- source->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch);
- if (try_catch.HasCaught()) return try_catch.ReThrow();
-
- // Copy as much as we can from left to right.
- int i = 0;
- int32_t next_dest_offset = byteOffset + (offset + 1) * element_size;
- int32_t next_src_offset = source_byteOffset + source_element_size;
- while (i < length && next_dest_offset <= next_src_offset) {
- self->Set(offset + i, source->Get(i));
- ++i;
- next_dest_offset += element_size;
- next_src_offset += source_element_size;
- }
- // Of what's left, copy as much as we can from right to left.
- int j = length - 1;
- int32_t dest_offset = byteOffset + (offset + j) * element_size;
- int32_t src_offset = source_byteOffset + j * source_element_size;
- while (j >= i && dest_offset >= src_offset) {
- self->Set(offset + j, source->Get(j));
- --j;
- dest_offset -= element_size;
- src_offset -= source_element_size;
- }
- // There can be at most 8 entries left in the middle that need buffering
- // (because the largest element_size is 8 times the smallest).
- ASSERT(j+1 - i <= 8);
- Handle<Value> temp[8];
- for (int k = i; k <= j; ++k) {
- temp[k - i] = source->Get(k);
- }
- for (int k = i; k <= j; ++k) {
- self->Set(offset + k, temp[k - i]);
- }
- } else {
- // Different backing stores, safe to copy element-wise sequentially.
- for (int i = 0; i < source_length; ++i)
- self->Set(offset + i, source->Get(i));
- }
- }
-
- return Undefined(args.GetIsolate());
-}
-
-
-void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate,
- Persistent<Object>* object,
- uint8_t* data) {
- HandleScope scope(isolate);
- int32_t length = (*object)->Get(
- PerIsolateData::byteLength_string(isolate))->Uint32Value();
- isolate->AdjustAmountOfExternalAllocatedMemory(-length);
- delete[] data;
- object->Dispose(isolate);
-}
-
-
-Handle<Value> Shell::Int8Array(const Arguments& args) {
- return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
-}
-
-
-Handle<Value> Shell::Uint8Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalUnsignedByteArray, sizeof(uint8_t));
-}
-
-
-Handle<Value> Shell::Int16Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalShortArray, sizeof(int16_t));
-}
-
-
-Handle<Value> Shell::Uint16Array(const Arguments& args) {
- return CreateExternalArray(
- args, kExternalUnsignedShortArray, sizeof(uint16_t));
-}
-
-
-Handle<Value> Shell::Int32Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalIntArray, sizeof(int32_t));
-}
-
-
-Handle<Value> Shell::Uint32Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalUnsignedIntArray, sizeof(uint32_t));
-}
-
-
-Handle<Value> Shell::Float32Array(const Arguments& args) {
- return CreateExternalArray(
- args, kExternalFloatArray, sizeof(float)); // NOLINT
-}
-
-
-Handle<Value> Shell::Float64Array(const Arguments& args) {
- return CreateExternalArray(
- args, kExternalDoubleArray, sizeof(double)); // NOLINT
-}
-
-
-Handle<Value> Shell::Uint8ClampedArray(const Arguments& args) {
- return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
-}
-
Handle<Value> Shell::Quit(const Arguments& args) {
int exit_code = args[0]->Int32Value();
@@ -1412,26 +819,6 @@ class BZip2Decompressor : public v8::StartupDataDecompressor {
#endif
-Handle<FunctionTemplate> Shell::CreateArrayBufferTemplate(
- InvocationCallback fun) {
- Handle<FunctionTemplate> buffer_template = FunctionTemplate::New(fun);
- Local<Template> proto_template = buffer_template->PrototypeTemplate();
- proto_template->Set(String::New("slice"),
- FunctionTemplate::New(ArrayBufferSlice));
- return buffer_template;
-}
-
-
-Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
- Handle<FunctionTemplate> array_template = FunctionTemplate::New(fun);
- Local<Template> proto_template = array_template->PrototypeTemplate();
- proto_template->Set(String::New("set"), FunctionTemplate::New(ArraySet));
- proto_template->Set(String::New("subarray"),
- FunctionTemplate::New(ArraySubArray));
- return array_template;
-}
-
-
Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
@@ -1469,36 +856,6 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
RealmSharedGet, RealmSharedSet);
global_template->Set(String::New("Realm"), realm_template);
- // Bind the handlers for external arrays.
-#ifndef V8_SHARED
- if (!i::FLAG_harmony_typed_arrays) {
-#endif // V8_SHARED
- PropertyAttribute attr =
- static_cast<PropertyAttribute>(ReadOnly | DontDelete);
- global_template->Set(PerIsolateData::ArrayBuffer_string(isolate),
- CreateArrayBufferTemplate(ArrayBuffer), attr);
- global_template->Set(String::New("Int8Array"),
- CreateArrayTemplate(Int8Array), attr);
- global_template->Set(String::New("Uint8Array"),
- CreateArrayTemplate(Uint8Array), attr);
- global_template->Set(String::New("Int16Array"),
- CreateArrayTemplate(Int16Array), attr);
- global_template->Set(String::New("Uint16Array"),
- CreateArrayTemplate(Uint16Array), attr);
- global_template->Set(String::New("Int32Array"),
- CreateArrayTemplate(Int32Array), attr);
- global_template->Set(String::New("Uint32Array"),
- CreateArrayTemplate(Uint32Array), attr);
- global_template->Set(String::New("Float32Array"),
- CreateArrayTemplate(Float32Array), attr);
- global_template->Set(String::New("Float64Array"),
- CreateArrayTemplate(Float64Array), attr);
- global_template->Set(String::New("Uint8ClampedArray"),
- CreateArrayTemplate(Uint8ClampedArray), attr);
-#ifndef V8_SHARED
- }
-#endif // V8_SHARED
-
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
AddOSMethods(os_templ);
@@ -1705,20 +1062,9 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
if (data == NULL) {
return Throw("Error reading file");
}
- Isolate* isolate = args.GetIsolate();
- Handle<Object> buffer = Object::New();
- buffer->SetHiddenValue(
- PerIsolateData::ArrayBufferMarkerPropName_string(isolate), True());
- Persistent<Object> persistent_buffer =
- Persistent<Object>::New(isolate, buffer);
- persistent_buffer.MakeWeak(isolate, data, ExternalArrayWeakCallback);
- persistent_buffer.MarkIndependent(isolate);
- isolate->AdjustAmountOfExternalAllocatedMemory(length);
-
- buffer->SetIndexedPropertiesToExternalArrayData(
- data, kExternalUnsignedByteArray, length);
- buffer->Set(PerIsolateData::byteLength_string(isolate),
- Int32::New(static_cast<int32_t>(length), isolate), ReadOnly);
+ Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(length);
+ memcpy(buffer->Data(), data, length);
+ delete[] data;
return buffer;
}
@@ -2167,8 +1513,27 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
}
+#ifdef V8_SHARED
+static void EnableHarmonyTypedArraysViaCommandLine() {
+ int fake_argc = 2;
+ char **fake_argv = new char*[2];
+ fake_argv[0] = NULL;
+ fake_argv[1] = strdup("--harmony-typed-arrays");
+ v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false);
+ free(fake_argv[1]);
+ delete[] fake_argv;
+}
+#endif
+
+
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
+#ifndef V8_SHARED
+ i::FLAG_harmony_array_buffer = true;
+ i::FLAG_harmony_typed_arrays = true;
+#else
+ EnableHarmonyTypedArraysViaCommandLine();
+#endif
int result = 0;
Isolate* isolate = Isolate::GetCurrent();
DumbLineEditor dumb_line_editor(isolate);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 92a2af23e4..26410e9de5 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -50,22 +50,23 @@ static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
: allocator_(allocator),
- eager_deoptimization_entry_code_entries_(-1),
- lazy_deoptimization_entry_code_entries_(-1),
- eager_deoptimization_entry_code_(AllocateCodeChunk(allocator)),
- lazy_deoptimization_entry_code_(AllocateCodeChunk(allocator)),
current_(NULL),
#ifdef ENABLE_DEBUGGER_SUPPORT
deoptimized_frame_info_(NULL),
#endif
- deoptimizing_code_list_(NULL) { }
+ deoptimizing_code_list_(NULL) {
+ for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
+ deopt_entry_code_entries_[i] = -1;
+ deopt_entry_code_[i] = AllocateCodeChunk(allocator);
+ }
+}
DeoptimizerData::~DeoptimizerData() {
- allocator_->Free(eager_deoptimization_entry_code_);
- eager_deoptimization_entry_code_ = NULL;
- allocator_->Free(lazy_deoptimization_entry_code_);
- lazy_deoptimization_entry_code_ = NULL;
+ for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
+ allocator_->Free(deopt_entry_code_[i]);
+ deopt_entry_code_[i] = NULL;
+ }
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
while (current != NULL) {
@@ -488,6 +489,7 @@ bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
StackFrame::Type frame_type) {
switch (deopt_type) {
case EAGER:
+ case SOFT:
case LAZY:
case DEBUGGER:
return (frame_type == StackFrame::STUB)
@@ -503,13 +505,11 @@ bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
const char* Deoptimizer::MessageFor(BailoutType type) {
switch (type) {
- case EAGER:
- case LAZY:
- return "DEOPT";
- case DEBUGGER:
- return "DEOPT FOR DEBUGGER";
- case OSR:
- return "OSR";
+ case EAGER: return "eager";
+ case SOFT: return "soft";
+ case LAZY: return "lazy";
+ case DEBUGGER: return "debugger";
+ case OSR: return "OSR";
}
UNREACHABLE();
return NULL;
@@ -545,13 +545,19 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
}
if (function != NULL && function->IsOptimized()) {
function->shared()->increment_deopt_count();
+ if (bailout_type_ == Deoptimizer::SOFT) {
+ // Soft deopts shouldn't count against the overall re-optimization count
+ // that can eventually lead to disabling optimization for a function.
+ int opt_count = function->shared()->opt_count();
+ if (opt_count > 0) opt_count--;
+ function->shared()->set_opt_count(opt_count);
+ }
}
compiled_code_ = FindOptimizedCode(function, optimized_code);
StackFrame::Type frame_type = function == NULL
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
trace_ = TraceEnabledFor(type, frame_type);
- if (trace_) Trace();
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
@@ -562,6 +568,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
Code* optimized_code) {
switch (bailout_type_) {
+ case Deoptimizer::SOFT:
case Deoptimizer::EAGER:
ASSERT(from_ == NULL);
return function->code();
@@ -590,17 +597,6 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
}
-void Deoptimizer::Trace() {
- PrintF("**** %s: ", Deoptimizer::MessageFor(bailout_type_));
- PrintFunctionName();
- PrintF(" at id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
- bailout_id_,
- reinterpret_cast<intptr_t>(from_),
- fp_to_sp_delta_ - (2 * kPointerSize));
- if (bailout_type_ == EAGER) compiled_code_->PrintDeoptLocation(bailout_id_);
-}
-
-
void Deoptimizer::PrintFunctionName() {
if (function_->IsJSFunction()) {
function_->PrintName();
@@ -639,9 +635,8 @@ Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
}
DeoptimizerData* data = isolate->deoptimizer_data();
- MemoryChunk* base = (type == EAGER)
- ? data->eager_deoptimization_entry_code_
- : data->lazy_deoptimization_entry_code_;
+ ASSERT(type < kBailoutTypesWithCodeEntry);
+ MemoryChunk* base = data->deopt_entry_code_[type];
return base->area_start() + (id * table_entry_size_);
}
@@ -650,9 +645,7 @@ int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
Address addr,
BailoutType type) {
DeoptimizerData* data = isolate->deoptimizer_data();
- MemoryChunk* base = (type == EAGER)
- ? data->eager_deoptimization_entry_code_
- : data->lazy_deoptimization_entry_code_;
+ MemoryChunk* base = data->deopt_entry_code_[type];
Address start = base->area_start();
if (base == NULL ||
addr < start ||
@@ -713,11 +706,14 @@ void Deoptimizer::DoComputeOutputFrames() {
// Print some helpful diagnostic information.
int64_t start = OS::Ticks();
if (trace_) {
- PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
- (bailout_type_ == LAZY ? " (lazy)" : ""),
+ PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
+ MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
- PrintF(" @%d]\n", bailout_id_);
+ PrintF(" @%d, FP to SP delta: %d]\n", bailout_id_, fp_to_sp_delta_);
+ if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
+ compiled_code_->PrintDeoptLocation(bailout_id_);
+ }
}
// Determine basic deoptimization information. The optimized frame is
@@ -794,11 +790,13 @@ void Deoptimizer::DoComputeOutputFrames() {
double ms = static_cast<double>(OS::Ticks() - start) / 1000;
int index = output_count_ - 1; // Index of the topmost frame.
JSFunction* function = output_[index]->GetFunction();
- PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
+ PrintF("[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
+ MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function));
- if (function != NULL) function->PrintName();
- PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
+ PrintFunctionName();
+ PrintF(" @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
" took %0.3f ms]\n",
+ bailout_id_,
node_id.ToInt(),
output_[index]->GetPc(),
FullCodeGenerator::State2String(
@@ -810,6 +808,193 @@ void Deoptimizer::DoComputeOutputFrames() {
}
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+ int frame_index) {
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (trace_) {
+ PrintF(" translating ");
+ function->PrintName();
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
+ }
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by JavaScriptFrameConstants.
+ unsigned fixed_frame_size = ComputeFixedSize(function);
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ ASSERT(frame_index >= 0 && frame_index < output_count_);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address for the bottommost output frame can be computed from
+ // the input frame pointer and the output frame's height. For all
+ // subsequent output frames, it can be computed from the previous one's
+ // top address and the current frame's size.
+ Register fp_reg = JavaScriptFrame::fp_register();
+ intptr_t top_address;
+ if (is_bottommost) {
+ // Determine whether the input frame contains alignment padding.
+ has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0;
+ // 2 = context and function in the frame.
+ // If the optimized frame had alignment padding, adjust the frame pointer
+ // to point to the new position of the old frame pointer after padding
+ // is removed. Subtract 2 * kPointerSize for the context and function slots.
+ top_address = input_->GetRegister(fp_reg.code()) - (2 * kPointerSize) -
+ height_in_bytes + has_alignment_padding_ * kPointerSize;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Synthesize their values and set them up
+ // explicitly.
+ //
+ // The caller's pc for the bottommost output frame is the same as in the
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and AST id of the bailout.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The caller's frame pointer for the bottommost output frame is the same
+ // as in the input frame. For all subsequent output frames, it can be
+ // read from the previous one. Also compute and set this frame's frame
+ // pointer.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
+ has_alignment_padding_ * kPointerSize) == fp_value);
+ output_frame->SetFp(fp_value);
+ if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+ ASSERT(!is_bottommost || !has_alignment_padding_ ||
+ (fp_value & kPointerSize) != 0);
+
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
+ Register context_reg = JavaScriptFrame::context_register();
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetContext(value);
+ if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The function was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ // The function for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Translate the rest of the frame.
+ for (unsigned i = 0; i < height; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ ASSERT(0 == output_offset);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* non_optimized_code = function->shared()->code();
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ Address start = non_optimized_code->instruction_start();
+ unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+ unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
+ output_frame->SetPc(pc_value);
+
+ FullCodeGenerator::State state =
+ FullCodeGenerator::StateField::decode(pc_and_state);
+ output_frame->SetState(Smi::FromInt(state));
+
+ // Set the continuation for the topmost frame.
+ if (is_topmost && bailout_type_ != DEBUGGER) {
+ Builtins* builtins = isolate_->builtins();
+ Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
+ if (bailout_type_ == LAZY) {
+ continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+ } else if (bailout_type_ == SOFT) {
+ continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
+ } else {
+ ASSERT(bailout_type_ == EAGER);
+ }
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+ }
+}
+
+
void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index) {
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
@@ -2206,11 +2391,9 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
- ASSERT(type == EAGER || type == LAZY);
+ ASSERT(type == EAGER || type == SOFT || type == LAZY);
DeoptimizerData* data = isolate->deoptimizer_data();
- int entry_count = (type == EAGER)
- ? data->eager_deoptimization_entry_code_entries_
- : data->lazy_deoptimization_entry_code_entries_;
+ int entry_count = data->deopt_entry_code_entries_[type];
if (max_entry_id < entry_count) return;
entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
while (max_entry_id >= entry_count) entry_count *= 2;
@@ -2223,9 +2406,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- MemoryChunk* chunk = (type == EAGER)
- ? data->eager_deoptimization_entry_code_
- : data->lazy_deoptimization_entry_code_;
+ MemoryChunk* chunk = data->deopt_entry_code_[type];
ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
desc.instr_size);
chunk->CommitArea(desc.instr_size);
@@ -2233,11 +2414,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
static_cast<size_t>(desc.instr_size));
CPU::FlushICache(chunk->area_start(), desc.instr_size);
- if (type == EAGER) {
- data->eager_deoptimization_entry_code_entries_ = entry_count;
- } else {
- data->lazy_deoptimization_entry_code_entries_ = entry_count;
- }
+ data->deopt_entry_code_entries_[type] = entry_count;
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index edf6c504f1..c1b3a9d25e 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -98,53 +98,34 @@ class OptimizedFunctionFilter BASE_EMBEDDED {
class Deoptimizer;
-class DeoptimizerData {
- public:
- explicit DeoptimizerData(MemoryAllocator* allocator);
- ~DeoptimizerData();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void Iterate(ObjectVisitor* v);
-#endif
-
- Code* FindDeoptimizingCode(Address addr);
- void RemoveDeoptimizingCode(Code* code);
-
- private:
- MemoryAllocator* allocator_;
- int eager_deoptimization_entry_code_entries_;
- int lazy_deoptimization_entry_code_entries_;
- MemoryChunk* eager_deoptimization_entry_code_;
- MemoryChunk* lazy_deoptimization_entry_code_;
- Deoptimizer* current_;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- DeoptimizedFrameInfo* deoptimized_frame_info_;
-#endif
-
- // List of deoptimized code which still have references from active stack
- // frames. These code objects are needed by the deoptimizer when deoptimizing
- // a frame for which the code object for the function function has been
- // changed from the code present when deoptimizing was done.
- DeoptimizingCodeListNode* deoptimizing_code_list_;
-
- friend class Deoptimizer;
-
- DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
-};
-
-
class Deoptimizer : public Malloced {
public:
enum BailoutType {
EAGER,
LAZY,
+ SOFT,
OSR,
// This last bailout type is not really a bailout, but used by the
// debugger to deoptimize stack frames to allow inspection.
DEBUGGER
};
+ static const int kBailoutTypesWithCodeEntry = SOFT + 1;
+
+ struct JumpTableEntry {
+ inline JumpTableEntry(Address entry,
+ Deoptimizer::BailoutType type,
+ bool frame)
+ : label(),
+ address(entry),
+ bailout_type(type),
+ needs_frame(frame) { }
+ Label label;
+ Address address;
+ Deoptimizer::BailoutType bailout_type;
+ bool needs_frame;
+ };
+
static bool TraceEnabledFor(BailoutType deopt_type,
StackFrame::Type frame_type);
static const char* MessageFor(BailoutType type);
@@ -354,7 +335,6 @@ class Deoptimizer : public Malloced {
int fp_to_sp_delta,
Code* optimized_code);
Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
- void Trace();
void PrintFunctionName();
void DeleteFrameDescriptions();
@@ -426,6 +406,10 @@ class Deoptimizer : public Malloced {
// from the input frame's double registers.
void CopyDoubleRegisters(FrameDescription* output_frame);
+ // Determines whether the input frame contains alignment padding by looking
+ // at the dynamic alignment state slot inside the frame.
+ bool HasAlignmentPadding(JSFunction* function);
+
Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
@@ -626,6 +610,40 @@ class FrameDescription {
};
+class DeoptimizerData {
+ public:
+ explicit DeoptimizerData(MemoryAllocator* allocator);
+ ~DeoptimizerData();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void Iterate(ObjectVisitor* v);
+#endif
+
+ Code* FindDeoptimizingCode(Address addr);
+ void RemoveDeoptimizingCode(Code* code);
+
+ private:
+ MemoryAllocator* allocator_;
+ int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
+ MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
+ Deoptimizer* current_;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ DeoptimizedFrameInfo* deoptimized_frame_info_;
+#endif
+
+ // List of deoptimized code which still have references from active stack
+ // frames. These code objects are needed by the deoptimizer when deoptimizing
+ // a frame for which the code object for the function function has been
+ // changed from the code present when deoptimizing was done.
+ DeoptimizingCodeListNode* deoptimizing_code_list_;
+
+ friend class Deoptimizer;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
+};
+
+
class TranslationBuffer BASE_EMBEDDED {
public:
explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index bbb1fb8d8e..e41734babd 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -293,7 +293,14 @@ static int DecodeIt(Isolate* isolate,
addr,
Deoptimizer::LAZY);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ id = Deoptimizer::GetDeoptimizationId(isolate,
+ addr,
+ Deoptimizer::SOFT);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ } else {
+ out.AddFormatted(" ;; soft deoptimization bailout %d", id);
+ }
} else {
out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
}
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 76d20303f8..0f6b639fa1 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -63,10 +63,10 @@ const char* const ExternalizeStringExtension::kSource =
v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
v8::Handle<v8::String> str) {
- if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) {
+ if (strcmp(*v8::String::Utf8Value(str), "externalizeString") == 0) {
return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
} else {
- ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0);
+ ASSERT(strcmp(*v8::String::Utf8Value(str), "isAsciiString") == 0);
return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
}
}
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 7ae090c987..71b70a533d 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -36,7 +36,7 @@ const char* const StatisticsExtension::kSource =
v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
v8::Handle<v8::String> str) {
- ASSERT(strcmp(*v8::String::AsciiValue(str), "getV8Statistics") == 0);
+ ASSERT(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0);
return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 5e8970835b..233b3b01c8 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -266,10 +266,12 @@ class Factory {
PretenureFlag pretenure = NOT_TENURED);
Handle<Object> NewNumberFromUint(uint32_t value,
PretenureFlag pretenure = NOT_TENURED);
-
+ inline Handle<Object> NewNumberFromSize(size_t value,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<HeapNumber> NewHeapNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
+
// These objects are used by the api to create env-independent data
// structures in the heap.
Handle<JSObject> NewNeanderObject();
@@ -539,6 +541,18 @@ class Factory {
};
+Handle<Object> Factory::NewNumberFromSize(size_t value,
+ PretenureFlag pretenure) {
+ if (Smi::IsValid(static_cast<intptr_t>(value))) {
+ return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
+ isolate());
+ } else {
+ return NewNumber(static_cast<double>(value), pretenure);
+ }
+}
+
+
+
} } // namespace v8::internal
#endif // V8_FACTORY_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index d5d58a7fac..58f29b4d9c 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -193,9 +193,11 @@ DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
-DEFINE_bool(track_fields, false, "track fields with only smi values")
-DEFINE_bool(track_double_fields, false, "track fields with double values")
+DEFINE_bool(track_fields, true, "track fields with only smi values")
+DEFINE_bool(track_double_fields, true, "track fields with double values")
+DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
DEFINE_implication(track_double_fields, track_fields)
+DEFINE_implication(track_heap_object_fields, track_fields)
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -205,7 +207,6 @@ DEFINE_bool(string_slices, true, "use string slices")
DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "", "optimization filter")
DEFINE_bool(use_range, true, "use hydrogen range analysis")
-DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
@@ -267,6 +268,8 @@ DEFINE_bool(lookup_sample_by_shared, true,
"info, not JSFunction itself")
DEFINE_bool(cache_optimized_code, true,
"cache optimized code for closures")
+DEFINE_bool(flush_optimized_code_cache, true,
+ "flushes the cache of optimized code for closures on every GC")
DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
@@ -463,6 +466,7 @@ DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
DEFINE_bool(flush_code_incrementally, true,
"flush code that we expect not to use again (incrementally)")
+DEFINE_bool(trace_code_flushing, false, "trace code flushing progress")
DEFINE_bool(age_code, true,
"track un-executed functions to age code and flush only "
"old code")
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 3c44f5e5b4..f09c24a3fb 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -584,6 +584,10 @@ class JavaScriptFrame: public StandardFrame {
// Build a list with summaries for this frame including all inlined frames.
virtual void Summarize(List<FrameSummary>* frames);
+ // Architecture-specific register description.
+ static Register fp_register();
+ static Register context_register();
+
static JavaScriptFrame* cast(StackFrame* frame) {
ASSERT(frame->is_java_script());
return static_cast<JavaScriptFrame*>(frame);
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 5a5773ebd0..7a8d5c90bc 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -545,19 +545,14 @@ int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
}
-void CustomArguments::IterateInstance(ObjectVisitor* v) {
- v->VisitPointers(values_, values_ + ARRAY_SIZE(values_));
-}
-
-
// Compute the property keys from the interceptor.
// TODO(rossberg): support symbols in API, and filter here if needed.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- CustomArguments args(isolate, interceptor->data(), *receiver, *object);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::NamedPropertyEnumerator enum_fun =
@@ -566,7 +561,7 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = enum_fun(info);
+ result = args.Call(enum_fun);
}
}
#if ENABLE_EXTRA_CHECKS
@@ -581,8 +576,8 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- CustomArguments args(isolate, interceptor->data(), *receiver, *object);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::IndexedPropertyEnumerator enum_fun =
@@ -591,7 +586,7 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = enum_fun(info);
+ result = args.Call(enum_fun);
#if ENABLE_EXTRA_CHECKS
CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
#endif
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 6139080bfa..98844f05e9 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -120,7 +120,6 @@ Heap::Heap()
new_space_high_promotion_mode_active_(false),
old_gen_promotion_limit_(kMinimumPromotionLimit),
old_gen_allocation_limit_(kMinimumAllocationLimit),
- old_gen_limit_factor_(1),
size_of_old_gen_at_last_old_space_gc_(0),
external_allocation_limit_(0),
amount_of_external_allocated_memory_(0),
@@ -912,26 +911,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
sweep_generation_++;
- bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
- IsStableOrIncreasingSurvivalTrend();
UpdateSurvivalRateTrend(start_new_space_size);
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
- if (high_survival_rate_during_scavenges &&
- IsStableOrIncreasingSurvivalTrend()) {
- // Stable high survival rates of young objects both during partial and
- // full collection indicate that mutator is either building or modifying
- // a structure with a long lifetime.
- // In this case we aggressively raise old generation memory limits to
- // postpone subsequent mark-sweep collection and thus trade memory
- // space for the mutation speed.
- old_gen_limit_factor_ = 2;
- } else {
- old_gen_limit_factor_ = 1;
- }
-
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
@@ -2517,6 +2501,54 @@ bool Heap::CreateInitialMaps() {
}
set_external_double_array_map(Map::cast(obj));
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_byte_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj =
+ AllocateEmptyExternalArray(kExternalUnsignedByteArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_short_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
+ kExternalUnsignedShortArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_int_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj =
+ AllocateEmptyExternalArray(kExternalUnsignedIntArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_float_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_double_array(ExternalArray::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_external_pixel_array(ExternalArray::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3248,6 +3280,40 @@ Heap::RootListIndex Heap::RootIndexForExternalArrayType(
}
}
+Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
+ ElementsKind elementsKind) {
+ switch (elementsKind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ return kEmptyExternalByteArrayRootIndex;
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ return kEmptyExternalUnsignedByteArrayRootIndex;
+ case EXTERNAL_SHORT_ELEMENTS:
+ return kEmptyExternalShortArrayRootIndex;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ return kEmptyExternalUnsignedShortArrayRootIndex;
+ case EXTERNAL_INT_ELEMENTS:
+ return kEmptyExternalIntArrayRootIndex;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ return kEmptyExternalUnsignedIntArrayRootIndex;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ return kEmptyExternalFloatArrayRootIndex;
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ return kEmptyExternalDoubleArrayRootIndex;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ return kEmptyExternalPixelArrayRootIndex;
+ default:
+ UNREACHABLE();
+ return kUndefinedValueRootIndex;
+ }
+}
+
+ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
+ return ExternalArray::cast(
+ roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
+}
+
+
+
MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
// We need to distinguish the minus zero value and this cannot be
@@ -3291,7 +3357,7 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name);
Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
share->set_code(illegal);
- share->ClearOptimizedCodeMap();
+ share->set_optimized_code_map(Smi::FromInt(0));
share->set_scope_info(ScopeInfo::Empty(isolate_));
Code* construct_stub =
isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
@@ -3982,13 +4048,11 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
Context* native_context = function->context()->native_context();
- bool needs_constructor_property;
Map* new_map;
if (function->shared()->is_generator()) {
// Generator prototypes can share maps since they don't have "constructor"
// properties.
new_map = native_context->generator_object_prototype_map();
- needs_constructor_property = false;
} else {
// Each function prototype gets a fresh map to avoid unwanted sharing of
// maps between prototypes of different constructors.
@@ -3996,14 +4060,13 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
ASSERT(object_function->has_initial_map());
MaybeObject* maybe_map = object_function->initial_map()->Copy();
if (!maybe_map->To(&new_map)) return maybe_map;
- needs_constructor_property = true;
}
Object* prototype;
MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- if (needs_constructor_property) {
+ if (!function->shared()->is_generator()) {
MaybeObject* maybe_failure =
JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
constructor_string(), function, DONT_ENUM);
@@ -4143,7 +4206,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// the inline_new flag so we only change the map if we generate a
// specialized construct stub.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
- if (instance_type == JS_OBJECT_TYPE &&
+ if (!fun->shared()->is_generator() &&
fun->shared()->CanGenerateInlineConstructor(prototype)) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
@@ -4179,7 +4242,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
}
}
- if (instance_type == JS_OBJECT_TYPE) {
+ if (!fun->shared()->is_generator()) {
fun->shared()->StartInobjectSlackTracking(map);
}
@@ -4252,7 +4315,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
- ASSERT(JSObject::cast(obj)->HasFastElements());
+ ASSERT(JSObject::cast(obj)->HasFastElements() ||
+ JSObject::cast(obj)->HasExternalArrayElements());
return obj;
}
@@ -5177,15 +5241,8 @@ MaybeObject* Heap::AllocateJSArray(
Context* native_context = isolate()->context()->native_context();
JSFunction* array_function = native_context->array_function();
Map* map = array_function->initial_map();
- Object* maybe_map_array = native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(elements_kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- map = Map::cast(maybe_transitioned_map);
- }
- }
-
+ Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
+ if (transition_map != NULL) map = transition_map;
return AllocateJSObjectFromMap(map, pretenure);
}
@@ -5223,6 +5280,10 @@ MaybeObject* Heap::AllocateEmptyFixedArray() {
return result;
}
+MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
+ return AllocateExternalArray(0, array_type, NULL, TENURED);
+}
+
MaybeObject* Heap::AllocateRawFixedArray(int length) {
if (length < 0 || length > FixedArray::kMaxLength) {
@@ -5872,7 +5933,7 @@ bool Heap::IdleGlobalGC() {
void Heap::Print() {
if (!HasBeenSetUp()) return;
- isolate()->PrintStack();
+ isolate()->PrintStack(stdout);
AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
space->Print();
@@ -5902,7 +5963,6 @@ void Heap::ReportHeapStatistics(const char* title) {
old_gen_promotion_limit_);
PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_allocation_limit_);
- PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index add42c01d6..b24b0b3608 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -128,7 +128,6 @@ namespace internal {
V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
- V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
V(Map, external_byte_array_map, ExternalByteArrayMap) \
V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
V(Map, external_short_array_map, ExternalShortArrayMap) \
@@ -137,6 +136,21 @@ namespace internal {
V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
V(Map, external_float_array_map, ExternalFloatArrayMap) \
V(Map, external_double_array_map, ExternalDoubleArrayMap) \
+ V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
+ V(ExternalArray, empty_external_byte_array, \
+ EmptyExternalByteArray) \
+ V(ExternalArray, empty_external_unsigned_byte_array, \
+ EmptyExternalUnsignedByteArray) \
+ V(ExternalArray, empty_external_short_array, EmptyExternalShortArray) \
+ V(ExternalArray, empty_external_unsigned_short_array, \
+ EmptyExternalUnsignedShortArray) \
+ V(ExternalArray, empty_external_int_array, EmptyExternalIntArray) \
+ V(ExternalArray, empty_external_unsigned_int_array, \
+ EmptyExternalUnsignedIntArray) \
+ V(ExternalArray, empty_external_float_array, EmptyExternalFloatArray) \
+ V(ExternalArray, empty_external_double_array, EmptyExternalDoubleArray) \
+ V(ExternalArray, empty_external_pixel_array, \
+ EmptyExternalPixelArray) \
V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
@@ -273,7 +287,11 @@ namespace internal {
V(minus_infinity_string, "-Infinity") \
V(hidden_stack_trace_string, "v8::hidden_stack_trace") \
V(query_colon_string, "(?:)") \
- V(Generator_string, "Generator")
+ V(Generator_string, "Generator") \
+ V(send_string, "send") \
+ V(throw_string, "throw") \
+ V(done_string, "done") \
+ V(value_string, "value")
// Forward declarations.
class GCTracer;
@@ -1567,7 +1585,11 @@ class Heap {
intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
limit += new_space_.Capacity();
- limit *= old_gen_limit_factor_;
+ // TODO(hpayer): Can be removed when when pretenuring is supported for all
+ // allocation sites.
+ if (IsHighSurvivalRate() && IsStableOrIncreasingSurvivalTrend()) {
+ limit *= 2;
+ }
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
@@ -1578,7 +1600,11 @@ class Heap {
intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
limit += new_space_.Capacity();
- limit *= old_gen_limit_factor_;
+ // TODO(hpayer): Can be removed when when pretenuring is supported for all
+ // allocation sites.
+ if (IsHighSurvivalRate() && IsStableOrIncreasingSurvivalTrend()) {
+ limit *= 2;
+ }
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
@@ -1626,6 +1652,9 @@ class Heap {
RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);
+ RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
+ ExternalArray* EmptyExternalArrayForMap(Map* map);
+
void RecordStats(HeapStats* stats, bool take_snapshot = false);
// Copy block of memory from src to dst. Size of block should be aligned
@@ -1998,10 +2027,6 @@ class Heap {
// every allocation in large object space.
intptr_t old_gen_allocation_limit_;
- // Sometimes the heuristics dictate that those limits are increased. This
- // variable records that fact.
- int old_gen_limit_factor_;
-
// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;
@@ -2140,6 +2165,10 @@ class Heap {
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
+ // Allocate empty external array of given type.
+ MUST_USE_RESULT MaybeObject* AllocateEmptyExternalArray(
+ ExternalArrayType array_type);
+
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 8f8c59ea8c..b7473879df 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -426,6 +426,8 @@ bool Range::MulAndCheckOverflow(Range* other) {
const char* HType::ToString() {
+ // Note: The c1visualizer syntax for locals allows only a sequence of the
+ // following characters: A-Za-z0-9_-|:
switch (type_) {
case kTagged: return "tagged";
case kTaggedPrimitive: return "primitive";
@@ -440,7 +442,7 @@ const char* HType::ToString() {
case kUninitialized: return "uninitialized";
}
UNREACHABLE();
- return "Unreachable code";
+ return "unreachable";
}
@@ -648,7 +650,6 @@ void HValue::SetOperandAt(int index, HValue* value) {
void HValue::DeleteAndReplaceWith(HValue* other) {
// We replace all uses first, so Delete can assert that there are none.
if (other != NULL) ReplaceAllUsesWith(other);
- ASSERT(HasNoUses());
Kill();
DeleteFromGraph();
}
@@ -694,16 +695,18 @@ void HValue::SetBlock(HBasicBlock* block) {
void HValue::PrintTypeTo(StringStream* stream) {
if (!representation().IsTagged() || type().Equals(HType::Tagged())) return;
- stream->Add(" type[%s]", type().ToString());
+ stream->Add(" type:%s", type().ToString());
}
void HValue::PrintRangeTo(StringStream* stream) {
if (range() == NULL || range()->IsMostGeneric()) return;
- stream->Add(" range[%d,%d,m0=%d]",
+ // Note: The c1visualizer syntax for locals allows only a sequence of the
+ // following characters: A-Za-z0-9_-|:
+ stream->Add(" range:%d_%d%s",
range()->lower(),
range()->upper(),
- static_cast<int>(range()->CanBeMinusZero()));
+ range()->CanBeMinusZero() ? "_m0" : "");
}
@@ -803,6 +806,14 @@ void HInstruction::PrintTo(StringStream* stream) {
}
+void HInstruction::PrintDataTo(StringStream *stream) {
+ for (int i = 0; i < OperandCount(); ++i) {
+ if (i > 0) stream->Add(" ");
+ OperandAt(i)->PrintNameTo(stream);
+ }
+}
+
+
void HInstruction::PrintMnemonicTo(StringStream* stream) {
stream->Add("%s ", Mnemonic());
}
@@ -1250,14 +1261,6 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
}
-void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- HControlInstruction::PrintDataTo(stream);
-}
-
-
void HReturn::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (pop ");
@@ -1441,6 +1444,16 @@ HValue* HMul::Canonicalize() {
}
+HValue* HMod::Canonicalize() {
+ return this;
+}
+
+
+HValue* HDiv::Canonicalize() {
+ return this;
+}
+
+
HValue* HChange::Canonicalize() {
return (from().Equals(to())) ? value() : this;
}
@@ -1678,6 +1691,7 @@ Range* HChange::InferRange(Zone* zone) {
!value()->CheckFlag(HInstruction::kUint32) &&
input_range != NULL && input_range->IsInSmiRange()) {
set_type(HType::Smi());
+ ClearGVNFlag(kChangesNewSpacePromotion);
}
Range* result = (input_range != NULL)
? input_range->Copy(zone)
@@ -1768,20 +1782,22 @@ Range* HMul::InferRange(Zone* zone) {
Range* HDiv::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
+ Range* a = left()->range();
+ Range* b = right()->range();
Range* result = new(zone) Range();
- if (left()->range()->CanBeMinusZero()) {
+ if (a->CanBeMinusZero()) {
result->set_can_be_minus_zero(true);
}
- if (left()->range()->CanBeZero() && right()->range()->CanBeNegative()) {
+ if (a->CanBeZero() && b->CanBeNegative()) {
result->set_can_be_minus_zero(true);
}
- if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
- SetFlag(HValue::kCanOverflow);
+ if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+ ClearFlag(HValue::kCanOverflow);
}
- if (!right()->range()->CanBeZero()) {
+ if (!b->CanBeZero()) {
ClearFlag(HValue::kCanBeDivByZero);
}
return result;
@@ -1794,16 +1810,17 @@ Range* HDiv::InferRange(Zone* zone) {
Range* HMod::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
+ Range* b = right()->range();
Range* result = new(zone) Range();
if (a->CanBeMinusZero() || a->CanBeNegative()) {
result->set_can_be_minus_zero(true);
}
- if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
- SetFlag(HValue::kCanOverflow);
+ if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+ ClearFlag(HValue::kCanOverflow);
}
- if (!right()->range()->CanBeZero()) {
+ if (!b->CanBeZero()) {
ClearFlag(HValue::kCanBeDivByZero);
}
return result;
@@ -1893,14 +1910,15 @@ void HPhi::PrintTo(StringStream* stream) {
value->PrintNameTo(stream);
stream->Add(" ");
}
- stream->Add(" uses%d_%di_%dd_%dt",
+ stream->Add(" uses:%d_%di_%dd_%dt",
UseCount(),
int32_non_phi_uses() + int32_indirect_uses(),
double_non_phi_uses() + double_indirect_uses(),
tagged_non_phi_uses() + tagged_indirect_uses());
- stream->Add("%s%s]",
- is_live() ? "_live" : "",
- IsConvertibleToInteger() ? "" : "_ncti");
+ if (!IsConvertibleToInteger()) stream->Add("_ncti");
+ PrintRangeTo(stream);
+ PrintTypeTo(stream);
+ stream->Add("]");
}
@@ -2681,7 +2699,12 @@ bool HLoadKeyed::UsesMustHandleHole() const {
return false;
}
- if (hole_mode() == ALLOW_RETURN_HOLE) return true;
+ if (hole_mode() == ALLOW_RETURN_HOLE) {
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ return AllUsesCanTreatHoleAsNaN();
+ }
+ return true;
+ }
if (IsFastDoubleElementsKind(elements_kind())) {
return false;
@@ -2698,6 +2721,22 @@ bool HLoadKeyed::UsesMustHandleHole() const {
}
+bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
+ if (!IsFastDoubleElementsKind(elements_kind())) {
+ return false;
+ }
+
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (use->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
bool HLoadKeyed::RequiresHoleCheck() const {
if (IsFastPackedElementsKind(elements_kind())) {
return false;
@@ -3008,16 +3047,6 @@ void HAllocate::PrintDataTo(StringStream* stream) {
}
-HType HArrayLiteral::CalculateInferredType() {
- return HType::JSArray();
-}
-
-
-HType HObjectLiteral::CalculateInferredType() {
- return HType::JSObject();
-}
-
-
HType HRegExpLiteral::CalculateInferredType() {
return HType::JSObject();
}
@@ -3350,6 +3379,9 @@ HInstruction* HMod::New(
if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
int32_t dividend = c_left->Integer32Value();
int32_t divisor = c_right->Integer32Value();
+ if (dividend == kMinInt && divisor == -1) {
+ return H_CONSTANT_DOUBLE(-0.0);
+ }
if (divisor != 0) {
int32_t res = dividend % divisor;
if ((res == 0) && (dividend < 0)) {
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 3ea99d40a6..d06e3184f8 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -70,7 +70,6 @@ class LChunkBuilder;
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArgumentsObject) \
- V(ArrayLiteral) \
V(Bitwise) \
V(BitNot) \
V(BlockEntry) \
@@ -104,6 +103,7 @@ class LChunkBuilder;
V(CompareConstantEqAndBranch) \
V(Constant) \
V(Context) \
+ V(DebugBreak) \
V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
@@ -128,7 +128,6 @@ class LChunkBuilder;
V(InstanceSize) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
@@ -150,7 +149,6 @@ class LChunkBuilder;
V(Mod) \
V(Mul) \
V(NumericConstraint) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -796,6 +794,7 @@ class HValue: public ZoneObject {
kDeoptimizeOnUndefined,
kIsArguments,
kTruncatingToInt32,
+ // Set after an instruction is killed.
kIsDead,
// Instructions that are allowed to produce full range unsigned integer
// values are marked with kUint32 flag. If arithmetic shift or a load from
@@ -811,6 +810,8 @@ class HValue: public ZoneObject {
// has processed this instruction.
kIDefsProcessingDone,
kHasNoObservableSideEffects,
+ // Indicates the instruction is live during dead code elimination.
+ kIsLive,
kLastFlag = kIDefsProcessingDone
};
@@ -1073,8 +1074,9 @@ class HValue: public ZoneObject {
UNREACHABLE();
}
- bool IsDead() const {
- return HasNoUses() && !HasObservableSideEffects() && IsDeletable();
+ // Check if this instruction has some reason that prevents elimination.
+ bool CannotBeEliminated() const {
+ return HasObservableSideEffects() || !IsDeletable();
}
#ifdef DEBUG
@@ -1249,7 +1251,7 @@ class HInstruction: public HValue {
HInstruction* previous() const { return previous_; }
virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) { }
+ virtual void PrintDataTo(StringStream* stream);
bool IsLinked() const { return block() != NULL; }
void Unlink();
@@ -1462,6 +1464,17 @@ class HSoftDeoptimize: public HTemplateInstruction<0> {
};
+// Inserts an int3/stop break instruction for debugging purposes.
+class HDebugBreak: public HTemplateInstruction<0> {
+ public:
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak)
+};
+
+
class HDeoptimize: public HControlInstruction {
public:
HDeoptimize(int environment_length, Zone* zone)
@@ -2971,7 +2984,6 @@ class HPhi: public HValue {
: inputs_(2, zone),
merged_index_(merged_index),
phi_id_(-1),
- is_live_(false),
is_convertible_to_integer_(true) {
for (int i = 0; i < Representation::kNumRepresentations; i++) {
non_phi_uses_[i] = 0;
@@ -2996,7 +3008,7 @@ class HPhi: public HValue {
void AddInput(HValue* value);
bool HasRealUses();
- bool IsReceiver() { return merged_index_ == 0; }
+ bool IsReceiver() const { return merged_index_ == 0; }
int merged_index() const { return merged_index_; }
@@ -3031,8 +3043,6 @@ class HPhi: public HValue {
return indirect_uses_[Representation::kDouble];
}
int phi_id() { return phi_id_; }
- bool is_live() { return is_live_; }
- void set_is_live(bool b) { is_live_ = b; }
static HPhi* cast(HValue* value) {
ASSERT(value->IsPhi());
@@ -3064,6 +3074,9 @@ class HPhi: public HValue {
void SimplifyConstantInputs();
+ // TODO(titzer): we can't eliminate the receiver for generating backtraces
+ virtual bool IsDeletable() const { return !IsReceiver(); }
+
protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -3082,7 +3095,6 @@ class HPhi: public HValue {
int non_phi_uses_[Representation::kNumRepresentations];
int indirect_uses_[Representation::kNumRepresentations];
int phi_id_;
- bool is_live_;
bool is_convertible_to_integer_;
};
@@ -3923,31 +3935,6 @@ class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
};
-class HIsNilAndBranch: public HUnaryControlInstruction {
- public:
- HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
- : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }
-
- EqualityKind kind() const { return kind_; }
- NilValue nil() const { return nil_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual Representation observed_input_representation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
-
- private:
- EqualityKind kind_;
- NilValue nil_;
-};
-
-
class HIsObjectAndBranch: public HUnaryControlInstruction {
public:
explicit HIsObjectAndBranch(HValue* value)
@@ -4416,6 +4403,8 @@ class HMod: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* Canonicalize();
+
DECLARE_CONCRETE_INSTRUCTION(Mod)
protected:
@@ -4427,6 +4416,7 @@ class HMod: public HArithmeticBinaryOperation {
HMod(HValue* context, HValue* left, HValue* right)
: HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
+ SetFlag(kCanOverflow);
}
};
@@ -4450,6 +4440,8 @@ class HDiv: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* Canonicalize();
+
DECLARE_CONCRETE_INSTRUCTION(Div)
protected:
@@ -5220,6 +5212,10 @@ class HLoadNamedField: public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
} else if (FLAG_track_double_fields && field_representation.IsDouble()) {
set_representation(field_representation);
+ } else if (FLAG_track_heap_object_fields &&
+ field_representation.IsHeapObject()) {
+ set_type(HType::NonPrimitive());
+ set_representation(Representation::Tagged());
} else {
set_representation(Representation::Tagged());
}
@@ -5415,7 +5411,7 @@ class HLoadKeyed
IsFastDoubleElementsKind(elements_kind));
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ELEMENTS) {
set_type(HType::Smi());
}
@@ -5490,6 +5486,7 @@ class HLoadKeyed
virtual void PrintDataTo(StringStream* stream);
bool UsesMustHandleHole() const;
+ bool AllUsesCanTreatHoleAsNaN() const;
bool RequiresHoleCheck() const;
virtual Range* InferRange(Zone* zone);
@@ -6081,106 +6078,6 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
};
-class HArrayLiteral: public HMaterializedLiteral<1> {
- public:
- HArrayLiteral(HValue* context,
- Handle<HeapObject> boilerplate_object,
- Handle<FixedArray> literals,
- int length,
- int literal_index,
- int depth,
- AllocationSiteMode mode)
- : HMaterializedLiteral<1>(literal_index, depth, mode),
- length_(length),
- boilerplate_object_(boilerplate_object),
- literals_(literals) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
-
- boilerplate_elements_kind_ = boilerplate_object_->IsJSObject()
- ? Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind()
- : TERMINAL_FAST_ELEMENTS_KIND;
-
- is_copy_on_write_ = boilerplate_object_->IsJSObject() &&
- (Handle<JSObject>::cast(boilerplate_object_)->elements()->map() ==
- HEAP->fixed_cow_array_map());
- }
-
- HValue* context() { return OperandAt(0); }
- ElementsKind boilerplate_elements_kind() const {
- return boilerplate_elements_kind_;
- }
- Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
- Handle<FixedArray> literals() const { return literals_; }
- int length() const { return length_; }
- bool IsCopyOnWrite() const { return is_copy_on_write_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
-
- private:
- int length_;
- Handle<HeapObject> boilerplate_object_;
- Handle<FixedArray> literals_;
- ElementsKind boilerplate_elements_kind_;
- bool is_copy_on_write_;
-};
-
-
-class HObjectLiteral: public HMaterializedLiteral<1> {
- public:
- HObjectLiteral(HValue* context,
- Handle<FixedArray> constant_properties,
- Handle<FixedArray> literals,
- bool fast_elements,
- int literal_index,
- int depth,
- bool may_store_doubles,
- bool has_function)
- : HMaterializedLiteral<1>(literal_index, depth),
- constant_properties_(constant_properties),
- constant_properties_length_(constant_properties->length()),
- literals_(literals),
- fast_elements_(fast_elements),
- may_store_doubles_(may_store_doubles),
- has_function_(has_function) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> constant_properties() const {
- return constant_properties_;
- }
- int constant_properties_length() const {
- return constant_properties_length_;
- }
- Handle<FixedArray> literals() const { return literals_; }
- bool fast_elements() const { return fast_elements_; }
- bool may_store_doubles() const { return may_store_doubles_; }
- bool has_function() const { return has_function_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
-
- private:
- Handle<FixedArray> constant_properties_;
- int constant_properties_length_;
- Handle<FixedArray> literals_;
- bool fast_elements_ : 1;
- bool may_store_doubles_ : 1;
- bool has_function_ : 1;
-};
-
-
class HRegExpLiteral: public HMaterializedLiteral<1> {
public:
HRegExpLiteral(HValue* context,
@@ -6301,8 +6198,13 @@ class HToFastProperties: public HUnaryOperation {
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
// This instruction is not marked as having side effects, but
// changes the map of the input operand. Use it only when creating
- // object literals.
- ASSERT(value->IsObjectLiteral());
+ // object literals via a runtime call.
+ ASSERT(value->IsCallRuntime());
+#ifdef DEBUG
+ const Runtime::Function* function = HCallRuntime::cast(value)->function();
+ ASSERT(function->function_id == Runtime::kCreateObjectLiteral ||
+ function->function_id == Runtime::kCreateObjectLiteralShallow);
+#endif
set_representation(Representation::Tagged());
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 5c573feb19..097216ef83 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -104,7 +104,6 @@ void HBasicBlock::AddPhi(HPhi* phi) {
void HBasicBlock::RemovePhi(HPhi* phi) {
ASSERT(phi->block() == this);
ASSERT(phis_.Contains(phi));
- ASSERT(phi->HasNoUses() || !phi->is_live());
phi->Kill();
phis_.RemoveElement(phi);
phi->SetBlock(NULL);
@@ -723,7 +722,7 @@ HInstruction* HGraphBuilder::IfBuilder::IfCompare(
new(zone()) HCompareIDAndBranch(left, right, token);
compare->set_observed_input_representation(input_representation,
input_representation);
- compare->ChangeRepresentation(input_representation);
+ compare->AssumeRepresentation(input_representation);
AddCompare(compare);
return compare;
}
@@ -905,7 +904,7 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
phi_ = new(zone()) HPhi(env->values()->length(), zone());
header_block_->AddPhi(phi_);
phi_->AddInput(initial);
- phi_->ChangeRepresentation(Representation::Integer32());
+ phi_->AssumeRepresentation(Representation::Integer32());
env->Push(initial);
builder_->current_block()->GotoNoSimulate(header_block_);
@@ -921,7 +920,7 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
new(zone()) HCompareIDAndBranch(phi_, terminating, token);
compare->set_observed_input_representation(input_representation,
input_representation);
- compare->ChangeRepresentation(input_representation);
+ compare->AssumeRepresentation(input_representation);
compare->SetSuccessorAt(0, body_block_);
compare->SetSuccessorAt(1, exit_block_);
builder_->current_block()->Finish(compare);
@@ -935,7 +934,7 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
increment_ = HSub::New(zone(), context_, phi_, one);
}
increment_->ClearFlag(HValue::kCanOverflow);
- increment_->ChangeRepresentation(Representation::Integer32());
+ increment_->AssumeRepresentation(Representation::Integer32());
builder_->AddInstruction(increment_);
return increment_;
} else {
@@ -955,7 +954,7 @@ void HGraphBuilder::LoopBuilder::EndBody() {
increment_ = HSub::New(zone(), context_, phi_, one);
}
increment_->ClearFlag(HValue::kCanOverflow);
- increment_->ChangeRepresentation(Representation::Integer32());
+ increment_->AssumeRepresentation(Representation::Integer32());
builder_->AddInstruction(increment_);
}
@@ -1048,6 +1047,7 @@ HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
HValue* HGraphBuilder::BuildCheckNonSmi(HValue* obj) {
+ if (obj->type().IsHeapObject()) return obj;
HCheckNonSmi* check = new(zone()) HCheckNonSmi(obj);
AddInstruction(check);
return check;
@@ -1122,6 +1122,7 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(
HValue* load_dependency,
ElementsKind elements_kind,
bool is_store,
+ LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
Zone* zone = this->zone();
if (is_store) {
@@ -1129,8 +1130,10 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(
switch (elements_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone) HCheckSmi(val));
+ if (!val->type().IsSmi()) {
+ // Smi-only arrays need a smi check.
+ AddInstruction(new(zone) HCheckSmi(val));
+ }
// Fall through.
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
@@ -1146,7 +1149,8 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(
return new(zone) HLoadKeyed(elements,
checked_key,
load_dependency,
- elements_kind);
+ elements_kind,
+ load_mode);
}
@@ -1188,7 +1192,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
if (is_js_array) {
HValue* new_length = AddInstruction(
HAdd::New(zone, context, length, graph_->GetConstant1()));
- new_length->ChangeRepresentation(Representation::Integer32());
+ new_length->AssumeRepresentation(Representation::Integer32());
new_length->ClearFlag(HValue::kCanOverflow);
Factory* factory = isolate()->factory();
@@ -1253,6 +1257,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
bool is_js_array,
ElementsKind elements_kind,
bool is_store,
+ LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode,
Representation checked_index_representation) {
ASSERT(!IsExternalArrayElementsKind(elements_kind) || !is_js_array);
@@ -1358,7 +1363,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
return AddInstruction(
BuildFastElementAccess(elements, checked_key, val, mapcheck,
- elements_kind, is_store, store_mode));
+ elements_kind, is_store, load_mode, store_mode));
}
@@ -1374,7 +1379,7 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
AddInstruction(elements_size_value);
HValue* mul = AddInstruction(
HMul::New(zone, context, capacity, elements_size_value));
- mul->ChangeRepresentation(Representation::Integer32());
+ mul->AssumeRepresentation(Representation::Integer32());
mul->ClearFlag(HValue::kCanOverflow);
HConstant* header_size =
@@ -1382,7 +1387,7 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
AddInstruction(header_size);
HValue* total_size = AddInstruction(
HAdd::New(zone, context, mul, header_size));
- total_size->ChangeRepresentation(Representation::Integer32());
+ total_size->AssumeRepresentation(Representation::Integer32());
total_size->ClearFlag(HValue::kCanOverflow);
HAllocate::Flags flags = HAllocate::DefaultFlags(kind);
@@ -1538,12 +1543,12 @@ HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* context,
HValue* half_old_capacity =
AddInstruction(HShr::New(zone, context, old_capacity,
graph_->GetConstant1()));
- half_old_capacity->ChangeRepresentation(Representation::Integer32());
+ half_old_capacity->AssumeRepresentation(Representation::Integer32());
half_old_capacity->ClearFlag(HValue::kCanOverflow);
HValue* new_capacity = AddInstruction(
HAdd::New(zone, context, half_old_capacity, old_capacity));
- new_capacity->ChangeRepresentation(Representation::Integer32());
+ new_capacity->AssumeRepresentation(Representation::Integer32());
new_capacity->ClearFlag(HValue::kCanOverflow);
HValue* min_growth =
@@ -1551,7 +1556,7 @@ HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* context,
new_capacity = AddInstruction(
HAdd::New(zone, context, new_capacity, min_growth));
- new_capacity->ChangeRepresentation(Representation::Integer32());
+ new_capacity->AssumeRepresentation(Representation::Integer32());
new_capacity->ClearFlag(HValue::kCanOverflow);
return new_capacity;
@@ -1807,27 +1812,27 @@ void HGraphBuilder::BuildCompareNil(
HIfContinuation* continuation) {
IfBuilder if_nil(this, position);
bool needs_or = false;
- if ((types & CompareNilICStub::kCompareAgainstNull) != 0) {
+ if (types.Contains(CompareNilICStub::NULL_TYPE)) {
if (needs_or) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
needs_or = true;
}
- if ((types & CompareNilICStub::kCompareAgainstUndefined) != 0) {
+ if (types.Contains(CompareNilICStub::UNDEFINED)) {
if (needs_or) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value,
graph()->GetConstantUndefined());
needs_or = true;
}
// Handle either undetectable or monomorphic, not both.
- ASSERT(((types & CompareNilICStub::kCompareAgainstUndetectable) == 0) ||
- ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) == 0));
- if ((types & CompareNilICStub::kCompareAgainstUndetectable) != 0) {
+ ASSERT(!types.Contains(CompareNilICStub::UNDETECTABLE) ||
+ !types.Contains(CompareNilICStub::MONOMORPHIC_MAP));
+ if (types.Contains(CompareNilICStub::UNDETECTABLE)) {
if (needs_or) if_nil.Or();
if_nil.If<HIsUndetectableAndBranch>(value);
} else {
if_nil.Then();
if_nil.Else();
- if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) {
+ if (!map.is_null() && types.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
BuildCheckNonSmi(value);
// For ICs, the map checked below is a sentinel map that gets replaced by
// the monomorphic map when the code is used as a template to generate a
@@ -1931,7 +1936,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
AddInstruction(elements_size_value);
HInstruction* mul = HMul::New(zone(), context, length_node,
elements_size_value);
- mul->ChangeRepresentation(Representation::Integer32());
+ mul->AssumeRepresentation(Representation::Integer32());
mul->ClearFlag(HValue::kCanOverflow);
AddInstruction(mul);
@@ -1939,7 +1944,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
Representation::Integer32());
AddInstruction(base);
HInstruction* total_size = HAdd::New(zone(), context, base, mul);
- total_size->ChangeRepresentation(Representation::Integer32());
+ total_size->AssumeRepresentation(Representation::Integer32());
total_size->ClearFlag(HValue::kCanOverflow);
AddInstruction(total_size);
return total_size;
@@ -2097,6 +2102,7 @@ HGraph::HGraph(CompilationInfo* info)
is_recursive_(false),
use_optimistic_licm_(false),
has_soft_deoptimize_(false),
+ depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
@@ -2135,7 +2141,6 @@ void HGraph::FinalizeUniqueValueIds() {
void HGraph::Canonicalize() {
- if (!FLAG_use_canonicalizing) return;
HPhase phase("H_Canonicalize", this);
for (int i = 0; i < blocks()->length(); ++i) {
HInstruction* instr = blocks()->at(i)->first();
@@ -2609,50 +2614,6 @@ void HGraph::EliminateRedundantPhis() {
}
-void HGraph::EliminateUnreachablePhis() {
- HPhase phase("H_Unreachable phi elimination", this);
-
- // Initialize worklist.
- ZoneList<HPhi*> phi_list(blocks_.length(), zone());
- ZoneList<HPhi*> worklist(blocks_.length(), zone());
- for (int i = 0; i < blocks_.length(); ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list.Add(phi, zone());
- // We can't eliminate phis in the receiver position in the environment
- // because in case of throwing an error we need this value to
- // construct a stack trace.
- if (phi->HasRealUses() || phi->IsReceiver()) {
- phi->set_is_live(true);
- worklist.Add(phi, zone());
- }
- }
- }
-
- // Iteratively mark live phis.
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- for (int i = 0; i < phi->OperandCount(); i++) {
- HValue* operand = phi->OperandAt(i);
- if (operand->IsPhi() && !HPhi::cast(operand)->is_live()) {
- HPhi::cast(operand)->set_is_live(true);
- worklist.Add(HPhi::cast(operand), zone());
- }
- }
- }
-
- // Remove unreachable phis.
- for (int i = 0; i < phi_list.length(); i++) {
- HPhi* phi = phi_list[i];
- if (!phi->is_live()) {
- HBasicBlock* block = phi->block();
- block->RemovePhi(phi);
- block->RecordDeletedPhi(phi->merged_index());
- }
- }
-}
-
-
bool HGraph::CheckArgumentsPhiUses() {
int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) {
@@ -3138,6 +3099,7 @@ class HStackCheckEliminator BASE_EMBEDDED {
void HStackCheckEliminator::Process() {
+ HPhase phase("H_Stack check elimination", graph_);
// For each loop block walk the dominator tree from the backwards branch to
// the loop header. If a call instruction is encountered the backwards branch
// is dominated by a call and the stack check in the backwards branch can be
@@ -3900,6 +3862,7 @@ void HInferRepresentation::Analyze() {
void HGraph::MergeRemovableSimulates() {
+ HPhase phase("H_Merge removable simulates", this);
ZoneList<HSimulate*> mergelist(2, zone());
for (int i = 0; i < blocks()->length(); ++i) {
HBasicBlock* block = blocks()->at(i);
@@ -4403,9 +4366,8 @@ void Uint32Analysis::UnmarkUnsafePhis() {
void HGraph::ComputeSafeUint32Operations() {
- if (!FLAG_opt_safe_uint32_operations || uint32_instructions_ == NULL) {
- return;
- }
+ HPhase phase("H_Compute safe UInt32 operations", this);
+ if (uint32_instructions_ == NULL) return;
Uint32Analysis analysis(zone());
for (int i = 0; i < uint32_instructions_->length(); ++i) {
@@ -4424,6 +4386,7 @@ void HGraph::ComputeSafeUint32Operations() {
void HGraph::ComputeMinusZeroChecks() {
+ HPhase phase("H_Compute minus zero checks", this);
BitVector visited(GetMaximumValueID(), zone());
for (int i = 0; i < blocks_.length(); ++i) {
for (HInstruction* current = blocks_[i]->first();
@@ -4868,19 +4831,17 @@ bool HOptimizedGraphBuilder::BuildGraph() {
}
+// Perform common subexpression elimination and loop-invariant code motion.
void HGraph::GlobalValueNumbering() {
- // Perform common subexpression elimination and loop-invariant code motion.
- if (FLAG_use_gvn) {
- HPhase phase("H_Global value numbering", this);
- HGlobalValueNumberer gvn(this, info());
- bool removed_side_effects = gvn.Analyze();
- // Trigger a second analysis pass to further eliminate duplicate values that
- // could only be discovered by removing side-effect-generating instructions
- // during the first pass.
- if (FLAG_smi_only_arrays && removed_side_effects) {
- removed_side_effects = gvn.Analyze();
- ASSERT(!removed_side_effects);
- }
+ HPhase phase("H_Global value numbering", this);
+ HGlobalValueNumberer gvn(this, info());
+ bool removed_side_effects = gvn.Analyze();
+ // Trigger a second analysis pass to further eliminate duplicate values that
+ // could only be discovered by removing side-effect-generating instructions
+ // during the first pass.
+ if (FLAG_smi_only_arrays && removed_side_effects) {
+ removed_side_effects = gvn.Analyze();
+ ASSERT(!removed_side_effects);
}
}
@@ -4913,7 +4874,11 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
"Unsupported phi use of arguments"));
return false;
}
- if (FLAG_eliminate_dead_phis) EliminateUnreachablePhis();
+
+ // Remove dead code and phis
+ if (FLAG_dead_code_elimination) {
+ DeadCodeElimination("H_Eliminate early dead code");
+ }
CollectPhis();
if (has_osr_loop_entry()) {
@@ -4940,11 +4905,11 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
// Must be performed before canonicalization to ensure that Canonicalize
// will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
// zero.
- ComputeSafeUint32Operations();
+ if (FLAG_opt_safe_uint32_operations) ComputeSafeUint32Operations();
- Canonicalize();
+ if (FLAG_use_canonicalizing) Canonicalize();
- GlobalValueNumbering();
+ if (FLAG_use_gvn) GlobalValueNumbering();
if (FLAG_use_range) {
HRangeAnalysis rangeAnalysis(this);
@@ -4961,7 +4926,9 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
EliminateRedundantBoundsChecks();
}
if (FLAG_array_index_dehoisting) DehoistSimpleArrayIndexComputations();
- if (FLAG_dead_code_elimination) DeadCodeElimination();
+ if (FLAG_dead_code_elimination) {
+ DeadCodeElimination("H_Eliminate late dead code");
+ }
RestoreActualValues();
@@ -5396,7 +5363,9 @@ static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
} else if (sub->right()->IsConstant()) {
subexpression = sub->left();
constant = HConstant::cast(sub->right());
- } return;
+ } else {
+ return;
+ }
} else {
return;
}
@@ -5438,35 +5407,98 @@ void HGraph::DehoistSimpleArrayIndexComputations() {
}
-void HGraph::DeadCodeElimination() {
- HPhase phase("H_Dead code elimination", this);
- ZoneList<HInstruction*> worklist(blocks_.length(), zone());
+void HGraph::DeadCodeElimination(const char* phase_name) {
+ HPhase phase(phase_name, this);
+ MarkLiveInstructions();
+ RemoveDeadInstructions();
+}
+
+
+void HGraph::MarkLiveInstructions() {
+ ZoneList<HValue*> worklist(blocks_.length(), zone());
+
+ // Mark initial root instructions for dead code elimination.
for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstruction* instr = blocks()->at(i)->first();
+ HBasicBlock* block = blocks()->at(i);
+ for (HInstruction* instr = block->first();
instr != NULL;
instr = instr->next()) {
- if (instr->IsDead()) worklist.Add(instr, zone());
+ if (instr->CannotBeEliminated()) MarkLive(NULL, instr, &worklist);
+ }
+ for (int j = 0; j < block->phis()->length(); j++) {
+ HPhi* phi = block->phis()->at(j);
+ if (phi->CannotBeEliminated()) MarkLive(NULL, phi, &worklist);
}
}
+ // Transitively mark all inputs of live instructions live.
while (!worklist.is_empty()) {
- HInstruction* instr = worklist.RemoveLast();
- // This happens when an instruction is used multiple times as operand. That
- // in turn could happen through GVN.
- if (!instr->IsLinked()) continue;
+ HValue* instr = worklist.RemoveLast();
+ for (int i = 0; i < instr->OperandCount(); ++i) {
+ MarkLive(instr, instr->OperandAt(i), &worklist);
+ }
+ }
+}
+
+
+void HGraph::MarkLive(HValue* ref, HValue* instr, ZoneList<HValue*>* worklist) {
+ if (!instr->CheckFlag(HValue::kIsLive)) {
+ instr->SetFlag(HValue::kIsLive);
+ worklist->Add(instr, zone());
+
if (FLAG_trace_dead_code_elimination) {
HeapStringAllocator allocator;
StringStream stream(&allocator);
- instr->PrintNameTo(&stream);
- stream.Add(" = ");
+ ALLOW_HANDLE_DEREF(isolate(), "debug mode printing");
+ if (ref != NULL) {
+ ref->PrintTo(&stream);
+ } else {
+ stream.Add("root ");
+ }
+ stream.Add(" -> ");
instr->PrintTo(&stream);
- PrintF("[removing dead instruction %s]\n", *stream.ToCString());
+ PrintF("[MarkLive %s]\n", *stream.ToCString());
}
- instr->DeleteAndReplaceWith(NULL);
- for (int i = 0; i < instr->OperandCount(); ++i) {
- HValue* operand = instr->OperandAt(i);
- if (operand->IsDead()) worklist.Add(HInstruction::cast(operand), zone());
+ }
+}
+
+
+void HGraph::RemoveDeadInstructions() {
+ ZoneList<HPhi*> dead_phis(blocks_.length(), zone());
+
+ // Remove any instruction not marked kIsLive.
+ for (int i = 0; i < blocks()->length(); ++i) {
+ HBasicBlock* block = blocks()->at(i);
+ for (HInstruction* instr = block->first();
+ instr != NULL;
+ instr = instr->next()) {
+ if (!instr->CheckFlag(HValue::kIsLive)) {
+ // Instruction has not been marked live; assume it is dead and remove.
+ // TODO(titzer): we don't remove constants because some special ones
+ // might be used by later phases and are assumed to be in the graph
+ if (!instr->IsConstant()) instr->DeleteAndReplaceWith(NULL);
+ } else {
+ // Clear the liveness flag to leave the graph clean for the next DCE.
+ instr->ClearFlag(HValue::kIsLive);
+ }
}
+ // Collect phis that are dead and remove them in the next pass.
+ for (int j = 0; j < block->phis()->length(); j++) {
+ HPhi* phi = block->phis()->at(j);
+ if (!phi->CheckFlag(HValue::kIsLive)) {
+ dead_phis.Add(phi, zone());
+ } else {
+ phi->ClearFlag(HValue::kIsLive);
+ }
+ }
+ }
+
+ // Process phis separately to avoid simultaneously mutating the phi list.
+ while (!dead_phis.is_empty()) {
+ HPhi* phi = dead_phis.RemoveLast();
+ HBasicBlock* block = phi->block();
+ phi->DeleteAndReplaceWith(NULL);
+ block->RecordDeletedPhi(phi->merged_index());
}
}
@@ -6756,16 +6788,32 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
pointer_size,
DONT_TRACK_ALLOCATION_SITE);
} else {
+ NoObservableSideEffectsScope no_effects(this);
Handle<FixedArray> closure_literals(closure->literals(), isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ int literal_index = expr->literal_index();
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags;
+
+ AddInstruction(new(zone()) HPushArgument(AddInstruction(
+ new(zone()) HConstant(closure_literals, Representation::Tagged()))));
+ AddInstruction(new(zone()) HPushArgument(AddInstruction(
+ new(zone()) HConstant(literal_index, Representation::Tagged()))));
+ AddInstruction(new(zone()) HPushArgument(AddInstruction(
+ new(zone()) HConstant(constant_properties, Representation::Tagged()))));
+ AddInstruction(new(zone()) HPushArgument(AddInstruction(
+ new(zone()) HConstant(flags, Representation::Tagged()))));
+
+ Runtime::FunctionId function_id =
+ (expr->depth() > 1 || expr->may_store_doubles())
+ ? Runtime::kCreateObjectLiteral : Runtime::kCreateObjectLiteralShallow;
literal = AddInstruction(
- new(zone()) HObjectLiteral(context,
- expr->constant_properties(),
- closure_literals,
- expr->fast_elements(),
- expr->literal_index(),
- expr->depth(),
- expr->may_store_doubles(),
- expr->has_function()));
+ new(zone()) HCallRuntime(context,
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(function_id),
+ 4));
}
// The object is expected in the bailout environment during computation
@@ -6899,14 +6947,46 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
pointer_size,
mode);
} else {
+ NoObservableSideEffectsScope no_effects(this);
+ // Boilerplate already exists and constant elements are never accessed,
+ // pass an empty fixed array to the runtime function instead.
+ Handle<FixedArray> constants = isolate()->factory()->empty_fixed_array();
+ int literal_index = expr->literal_index();
+
+ // TODO(mstarzinger): The following check and deopt is actually obsolete
+ // but test cases for the tick processor fails because profile differs.
+
+ // Deopt if the array literal boilerplate ElementsKind is of a type
+ // different than the expected one. The check isn't necessary if the
+ // boilerplate has already been converted to TERMINAL_FAST_ELEMENTS_KIND.
+ if (CanTransitionToMoreGeneralFastElementsKind(
+ boilerplate_elements_kind, true)) {
+ IfBuilder builder(this);
+ HValue* boilerplate = AddInstruction(new(zone())
+ HConstant(original_boilerplate_object, Representation::Tagged()));
+ HValue* elements_kind = AddInstruction(new(zone())
+ HElementsKind(boilerplate));
+ HValue* expected_kind = AddInstruction(new(zone())
+ HConstant(boilerplate_elements_kind, Representation::Integer32()));
+ builder.IfCompare(elements_kind, expected_kind, Token::EQ);
+ builder.Then();
+ builder.ElseDeopt();
+ }
+
+ AddInstruction(new(zone()) HPushArgument(AddInstruction(
+ new(zone()) HConstant(literals, Representation::Tagged()))));
+ AddInstruction(new(zone()) HPushArgument(AddInstruction(
+ new(zone()) HConstant(literal_index, Representation::Tagged()))));
+ AddInstruction(new(zone()) HPushArgument(AddInstruction(
+ new(zone()) HConstant(constants, Representation::Tagged()))));
+
+ Runtime::FunctionId function_id = (expr->depth() > 1)
+ ? Runtime::kCreateArrayLiteral : Runtime::kCreateArrayLiteralShallow;
literal = AddInstruction(
- new(zone()) HArrayLiteral(context,
- original_boilerplate_object,
- literals,
- length,
- expr->literal_index(),
- expr->depth(),
- mode));
+ new(zone()) HCallRuntime(context,
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(function_id),
+ 3));
}
// The array is expected in the bailout environment during computation
@@ -6934,9 +7014,11 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
switch (boilerplate_elements_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone()) HCheckSmi(value));
- // Fall through.
+ if (!value->type().IsSmi()) {
+ // Smi-only arrays need a smi check.
+ AddInstruction(new(zone()) HCheckSmi(value));
+ // Fall through.
+ }
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -7011,14 +7093,14 @@ static Representation ComputeLoadStoreRepresentation(Handle<Map> type,
void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) {
- AddInstruction(new(zone()) HCheckNonSmi(object));
+ BuildCheckNonSmi(object);
AddInstruction(HCheckMaps::New(object, map, zone()));
}
void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map) {
- AddInstruction(new(zone()) HCheckNonSmi(object));
+ BuildCheckNonSmi(object);
AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
}
@@ -7163,7 +7245,7 @@ bool HOptimizedGraphBuilder::HandlePolymorphicArrayLengthLoad(
if (types->at(i)->instance_type() != JS_ARRAY_TYPE) return false;
}
- AddInstruction(new(zone()) HCheckNonSmi(object));
+ BuildCheckNonSmi(object);
HInstruction* typecheck =
AddInstruction(HCheckMaps::New(object, types, zone()));
@@ -7183,7 +7265,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
if (HandlePolymorphicArrayLengthLoad(expr, object, types, name))
return;
- AddInstruction(new(zone()) HCheckNonSmi(object));
+ BuildCheckNonSmi(object);
// Use monomorphic load if property lookup results in the same field index
// for all maps. Requires special map check on the set of all handled maps.
@@ -7261,7 +7343,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, true)) {
if (count == 0) {
- AddInstruction(new(zone()) HCheckNonSmi(object)); // Only needed once.
+ BuildCheckNonSmi(object);
join = graph()->CreateBasicBlock();
}
++count;
@@ -7933,10 +8015,23 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
if (dependency) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
}
+
+ // Loads from a "stock" fast holey double arrays can elide the hole check.
+ LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
+ if (*map == isolate()->get_initial_js_array_map(FAST_HOLEY_DOUBLE_ELEMENTS) &&
+ isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
+ Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
+ AddInstruction(
+ new(zone()) HCheckPrototypeMaps(prototype, object_prototype, zone()));
+ load_mode = ALLOW_RETURN_HOLE;
+ graph()->MarkDependsOnEmptyArrayProtoElements();
+ }
+
return BuildUncheckedMonomorphicElementAccess(
object, key, val,
mapcheck, map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), is_store, store_mode);
+ map->elements_kind(), is_store, load_mode, store_mode);
}
@@ -7991,7 +8086,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
object, key, val, check_maps,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
most_general_consolidated_map->elements_kind(),
- false, STANDARD_STORE);
+ false, NEVER_RETURN_HOLE, STANDARD_STORE);
return instr;
}
@@ -8007,7 +8102,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
*has_side_effects = false;
- AddInstruction(new(zone()) HCheckNonSmi(object));
+ BuildCheckNonSmi(object);
SmallMapList* maps = prop->GetReceiverTypes();
bool todo_external_array = false;
@@ -8164,7 +8259,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store, STANDARD_STORE));
+ elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
if (!is_store) {
Push(access);
}
@@ -8182,7 +8277,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store, STANDARD_STORE));
+ elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE));
} else if (elements_kind == DICTIONARY_ELEMENTS) {
if (is_store) {
access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -8231,7 +8326,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
: BuildLoadKeyedGeneric(obj, key);
AddInstruction(instr);
} else {
- AddInstruction(new(zone()) HCheckNonSmi(obj));
+ BuildCheckNonSmi(obj);
instr = BuildMonomorphicElementAccess(
obj, key, val, NULL, map, is_store, expr->GetStoreMode());
}
@@ -8366,7 +8461,7 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
HInstruction* instr = NULL;
if (expr->IsStringLength()) {
HValue* string = Pop();
- AddInstruction(new(zone()) HCheckNonSmi(string));
+ BuildCheckNonSmi(string);
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
instr = HStringLength::New(zone(), string);
} else if (expr->IsStringAccess()) {
@@ -8381,7 +8476,7 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
- AddInstruction(new(zone()) HCheckNonSmi(function));
+ BuildCheckNonSmi(function);
instr = new(zone()) HLoadFunctionPrototype(function);
} else if (expr->key()->IsPropertyName()) {
@@ -8555,7 +8650,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
empty_smi_block->Goto(number_block);
set_current_block(not_smi_block);
} else {
- AddInstruction(new(zone()) HCheckNonSmi(receiver));
+ BuildCheckNonSmi(receiver);
}
}
HBasicBlock* if_true = graph()->CreateBasicBlock();
@@ -10182,7 +10277,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
return new(zone()) HConstant(s->Get(i), Representation::Integer32());
}
}
- AddInstruction(new(zone()) HCheckNonSmi(string));
+ BuildCheckNonSmi(string);
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
HInstruction* length = HStringLength::New(zone(), string);
AddInstruction(length);
@@ -10236,7 +10331,7 @@ bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
}
-bool CanBeZero(HValue *right) {
+bool CanBeZero(HValue* right) {
if (right->IsConstant()) {
HConstant* right_const = HConstant::cast(right);
if (right_const->HasInteger32Value() &&
@@ -10268,9 +10363,9 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
switch (expr->op()) {
case Token::ADD:
if (left_info.IsString() && right_info.IsString()) {
- AddInstruction(new(zone()) HCheckNonSmi(left));
+ BuildCheckNonSmi(left);
AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
+ BuildCheckNonSmi(right);
AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
instr = HStringAdd::New(zone(), context, left, right);
} else {
@@ -10404,7 +10499,7 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
if ((is_logical_and && left_constant->BooleanValue()) ||
(!is_logical_and && !left_constant->BooleanValue())) {
Drop(1); // left_value.
- CHECK_BAILOUT(VisitForValue(expr->right()));
+ CHECK_ALIVE(VisitForValue(expr->right()));
}
return ast_context()->ReturnValue(Pop());
}
@@ -10676,9 +10771,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
- AddInstruction(new(zone()) HCheckNonSmi(left));
+ BuildCheckNonSmi(left);
AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
+ BuildCheckNonSmi(right);
AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
@@ -10691,9 +10786,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
} else if (overall_type_info.IsInternalizedString() &&
Token::IsEqualityOp(op)) {
- AddInstruction(new(zone()) HCheckNonSmi(left));
+ BuildCheckNonSmi(left);
AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone()));
- AddInstruction(new(zone()) HCheckNonSmi(right));
+ BuildCheckNonSmi(right);
AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
@@ -10730,15 +10825,13 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
TypeFeedbackId id = expr->CompareOperationFeedbackId();
CompareNilICStub::Types types;
if (kind == kStrictEquality) {
- if (nil == kNullValue) {
- types = CompareNilICStub::kCompareAgainstNull;
- } else {
- types = CompareNilICStub::kCompareAgainstUndefined;
- }
+ types.Add((nil == kNullValue) ? CompareNilICStub::NULL_TYPE :
+ CompareNilICStub::UNDEFINED);
} else {
- types = static_cast<CompareNilICStub::Types>(
- oracle()->CompareNilTypes(id));
- if (types == 0) types = CompareNilICStub::kFullCompare;
+ types = CompareNilICStub::Types(oracle()->CompareNilTypes(id));
+ if (types.IsEmpty()) {
+ types = CompareNilICStub::Types::FullCompare();
+ }
}
Handle<Map> map_handle(oracle()->CompareNilMonomorphicReceiverType(id));
BuildCompareNil(value, kind, types, map_handle,
@@ -10843,9 +10936,11 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
+ int copied_fields = 0;
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
+ copied_fields++;
int index = descriptors->GetFieldIndex(i);
int property_offset = boilerplate_object->GetInObjectPropertyOffset(index);
Handle<Name> name(descriptors->GetKey(i));
@@ -10884,6 +10979,16 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy(
}
}
+ int inobject_properties = boilerplate_object->map()->inobject_properties();
+ HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
+ factory->one_pointer_filler_map(), Representation::Tagged()));
+ for (int i = copied_fields; i < inobject_properties; i++) {
+ AddInstruction(new(zone) HStoreNamedField(
+ object_properties, factory->unknown_field_string(), value_instruction,
+ true, Representation::Tagged(),
+ boilerplate_object->GetInObjectPropertyOffset(i)));
+ }
+
// Build Allocation Site Info if desired
if (create_allocation_site_info) {
BuildCreateAllocationSiteInfo(target, JSArray::kSize, original_boilerplate);
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index a95424a1c9..b053fc71c5 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -277,17 +277,15 @@ class HGraph: public ZoneObject {
void GlobalValueNumbering();
bool ProcessArgumentsObject();
void EliminateRedundantPhis();
- void EliminateUnreachablePhis();
void Canonicalize();
void OrderBlocks();
void AssignDominators();
void SetupInformativeDefinitions();
void EliminateRedundantBoundsChecks();
void DehoistSimpleArrayIndexComputations();
- void DeadCodeElimination();
void RestoreActualValues();
+ void DeadCodeElimination(const char *phase_name);
void PropagateDeoptimizingMark();
- void EliminateUnusedInstructions();
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
@@ -389,6 +387,14 @@ class HGraph: public ZoneObject {
return is_recursive_;
}
+ void MarkDependsOnEmptyArrayProtoElements() {
+ depends_on_empty_array_proto_elements_ = true;
+ }
+
+ bool depends_on_empty_array_proto_elements() {
+ return depends_on_empty_array_proto_elements_;
+ }
+
void RecordUint32Instruction(HInstruction* instr) {
if (uint32_instructions_ == NULL) {
uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
@@ -402,6 +408,9 @@ class HGraph: public ZoneObject {
HConstant* GetConstantSmi(SetOncePointer<HConstant>* pointer,
int32_t integer_value);
+ void MarkLive(HValue* ref, HValue* instr, ZoneList<HValue*>* worklist);
+ void MarkLiveInstructions();
+ void RemoveDeadInstructions();
void MarkAsDeoptimizingRecursively(HBasicBlock* block);
void NullifyUnreachableInstructions();
void InsertTypeConversions(HInstruction* instr);
@@ -449,6 +458,7 @@ class HGraph: public ZoneObject {
bool is_recursive_;
bool use_optimistic_licm_;
bool has_soft_deoptimize_;
+ bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
@@ -1002,6 +1012,7 @@ class HGraphBuilder {
HValue* dependency,
ElementsKind elements_kind,
bool is_store,
+ LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
HValue* BuildCheckForCapacityGrow(HValue* object,
@@ -1024,6 +1035,7 @@ class HGraphBuilder {
bool is_js_array,
ElementsKind elements_kind,
bool is_store,
+ LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode,
Representation checked_index_representation = Representation::None());
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index c6e10f4b41..2b45d7654d 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -657,6 +657,11 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 2897234977..507aeb6772 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -292,8 +292,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into edx.
- __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
+ __ mov(edx, FieldOperand(ebx, SharedFunctionInfo::kFirstCodeSlot));
+ __ cmp(ecx, FieldOperand(ebx, SharedFunctionInfo::kFirstContextSlot));
__ j(equal, &install_optimized);
// Iterate through the rest of map backwards. edx holds an index as a Smi.
@@ -302,10 +302,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ bind(&loop);
// Do not double check first entry.
- __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
__ j(equal, &restore);
- __ sub(edx, Immediate(Smi::FromInt(
- SharedFunctionInfo::kEntryLength))); // Skip an entry.
+ __ sub(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
__ j(not_equal, &loop, Label::kNear);
// Hit: fetch the optimized code.
@@ -1227,6 +1226,14 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
}
+static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) {
+ __ pop(ecx);
+ __ pop(eax);
+ __ pop(edx);
+ __ push(ecx);
+}
+
+
static void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
@@ -1633,7 +1640,9 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
BinaryOpStub_GenerateSmiCode(
masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
}
- __ bind(&call_runtime);
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
switch (op_) {
case Token::ADD:
case Token::SUB:
@@ -1653,6 +1662,34 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
default:
UNREACHABLE();
}
+
+ __ bind(&call_runtime);
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ break;
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ BinaryOpStub_GenerateRegisterArgsPop(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ push(eax);
+ GenerateCallRuntime(masm);
+ }
+ __ ret(0);
}
@@ -1677,7 +1714,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime, Label::kNear);
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub string_add_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_stub);
@@ -1869,7 +1907,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::SUB:
case Token::MUL:
case Token::DIV:
- GenerateRegisterArgsPush(masm);
break;
case Token::MOD:
return; // Handled above.
@@ -1879,11 +1916,19 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR:
+ BinaryOpStub_GenerateRegisterArgsPop(masm);
break;
default:
UNREACHABLE();
}
- GenerateCallRuntime(masm);
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ push(eax);
+ GenerateCallRuntime(masm);
+ }
+ __ ret(0);
}
@@ -2086,7 +2131,6 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD:
- GenerateRegisterArgsPush(masm);
break;
case Token::BIT_OR:
case Token::BIT_AND:
@@ -2094,11 +2138,19 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR:
+ BinaryOpStub_GenerateRegisterArgsPop(masm);
break;
default:
UNREACHABLE();
}
- GenerateCallRuntime(masm);
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ push(eax);
+ GenerateCallRuntime(masm);
+ }
+ __ ret(0);
}
@@ -2264,7 +2316,6 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
case Token::SUB:
case Token::MUL:
case Token::DIV:
- GenerateRegisterArgsPush(masm);
break;
case Token::MOD:
case Token::BIT_OR:
@@ -2273,11 +2324,19 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR:
+ BinaryOpStub_GenerateRegisterArgsPop(masm);
break;
default:
UNREACHABLE();
}
- GenerateCallRuntime(masm);
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ push(eax);
+ GenerateCallRuntime(masm);
+ }
+ __ ret(0);
}
@@ -2294,7 +2353,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ StringAddStub string_add_left_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
@@ -2304,7 +2364,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime, Label::kNear);
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ StringAddStub string_add_right_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
@@ -5714,7 +5775,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
+ if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
__ JumpIfSmi(eax, &call_runtime);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &call_runtime);
@@ -6022,15 +6083,52 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Drop(2);
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ if ((flags_ & ERECT_FRAME) != 0) {
+ GenerateRegisterArgsPop(masm, ecx);
+ // Build a frame
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ __ CallRuntime(Runtime::kStringAdd, 2);
+ }
+ __ ret(0);
+ } else {
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ }
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ if ((flags_ & ERECT_FRAME) != 0) {
+ GenerateRegisterArgsPop(masm, ecx);
+ // Build a frame
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
+ }
+ __ ret(0);
+ } else {
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
}
}
+void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ push(eax);
+ __ push(edx);
+}
+
+
+void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
+ Register temp) {
+ __ pop(temp);
+ __ pop(edx);
+ __ pop(eax);
+ __ push(temp);
+}
+
+
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index fbf1a68e66..6dc63bdd49 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -218,13 +218,14 @@ class StringHelper : public AllStatic {
};
-// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_ADD_FLAGS = 1 << 0,
// Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
// Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
+ // Stub needs a frame before calling the runtime
+ ERECT_FRAME = 1 << 3,
// Omit both string checks in stub.
NO_STRING_CHECK_IN_STUB =
NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
@@ -249,6 +250,9 @@ class StringAddStub: public PlatformCodeStub {
Register scratch3,
Label* slow);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateRegisterArgsPop(MacroAssembler* masm, Register temp);
+
const StringAddFlags flags_;
};
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index f8aff48d13..9f3c4e97f6 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -123,14 +123,13 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
+ // The optimized code is going to be patched, so we cannot use it any more.
+ function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function");
+
// We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte
// array. Later on we will slide this to the start of the byte array and
@@ -363,7 +362,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
+ PrintFunctionName();
PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
ast_id,
input_frame_size,
@@ -477,198 +476,12 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
+ PrintFunctionName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
-
- unsigned alignment_state_offset =
- input_offset - parameter_count * kPointerSize -
- StandardFrameConstants::kFixedFrameSize -
- kPointerSize;
- ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
- JavaScriptFrameConstants::kLocal0Offset);
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
- has_alignment_padding_ =
- (alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
- // 2 = context and function in the frame.
- // If the optimized frame had alignment padding, adjust the frame pointer
- // to point to the new position of the old frame pointer after padding
- // is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost ||
- (input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
- fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
- ASSERT(!is_bottommost || !has_alignment_padding_ ||
- (fp_value & kPointerSize) != 0);
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<uint32_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(esi.code(), value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -711,6 +524,20 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned alignment_state_offset =
+ input_frame_size - parameter_count * kPointerSize -
+ StandardFrameConstants::kFixedFrameSize -
+ kPointerSize;
+ ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
+ JavaScriptFrameConstants::kLocal0Offset);
+ int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
+ return (alignment_state == kAlignmentPaddingPushed);
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
@@ -741,7 +568,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the address of the location in the code object if possible
// and compute the fp-to-sp delta in register edx.
- if (type() == EAGER) {
+ if (type() == EAGER || type() == SOFT) {
__ Set(ecx, Immediate(0));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
} else {
@@ -794,7 +621,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ fnclex();
// Remove the bailout id and the double registers from the stack.
- if (type() == EAGER) {
+ if (type() == EAGER || type() == SOFT) {
__ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
} else {
__ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index 4932fa387e..ea19e9f6ba 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -43,6 +43,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) {
}
+Register JavaScriptFrame::fp_register() { return ebp; }
+Register JavaScriptFrame::context_register() { return esi; }
+
+
Register StubFailureTrampolineFrame::fp_register() { return ebp; }
Register StubFailureTrampolineFrame::context_register() { return esi; }
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 5a780197c3..1bc72ec314 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -161,6 +161,7 @@ void FullCodeGenerator::Generate() {
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS Function.
+ info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -410,6 +411,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ mov(esp, ebp);
+ int no_frame_start = masm_->pc_offset();
__ pop(ebp);
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
@@ -420,6 +422,7 @@ void FullCodeGenerator::EmitReturnSequence() {
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -1922,8 +1925,95 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING:
- UNIMPLEMENTED();
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
+ // Initial send value is undefined.
+ __ mov(eax, isolate()->factory()->undefined_value());
+ __ jmp(&l_send);
+
+ // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // iter
+ __ push(edx); // iter
+ __ push(eax); // exception
+ __ mov(ecx, isolate()->factory()->throw_string()); // "throw"
+ Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(throw_ic); // iter.throw in eax
+ __ jmp(&l_call);
+
+ // try { received = yield result.value }
+ __ bind(&l_try);
+ __ pop(eax); // result.value
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(eax); // result.value
+ __ push(Operand(esp, (0 + 1) * kPointerSize + handler_size)); // g
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CompareRoot(eax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &l_resume);
+ EmitReturnIteratorResult(false);
+ __ bind(&l_resume); // received in eax
+ __ PopTryHandler();
+
+ // receiver = iter; f = iter.send; arg = received;
+ __ bind(&l_send);
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // iter
+ __ push(edx); // iter
+ __ push(eax); // received
+ __ mov(ecx, isolate()->factory()->send_string()); // "send"
+ Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(send_ic); // iter.send in eax
+
+ // result = f.call(receiver, arg);
+ __ bind(&l_call);
+ Label l_call_runtime;
+ __ JumpIfSmi(eax, &l_call_runtime);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &l_call_runtime);
+ __ mov(edi, eax);
+ ParameterCount count(1);
+ __ InvokeFunction(edi, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ jmp(&l_loop);
+ __ bind(&l_call_runtime);
+ __ push(eax);
+ __ CallRuntime(Runtime::kCall, 3);
+
+ // val = result.value; if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ // result.value
+ __ push(eax); // save result
+ __ mov(edx, eax); // result
+ __ mov(ecx, isolate()->factory()->value_string()); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in eax
+ __ pop(ebx); // result
+ __ push(eax); // result.value
+ __ mov(edx, ebx); // result
+ __ mov(ecx, isolate()->factory()->done_string()); // "done"
+ Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(done_ic); // result.done in eax
+ ToBooleanStub stub(eax);
+ __ push(eax);
+ __ CallStub(&stub);
+ __ test(eax, eax);
+ __ j(zero, &l_try);
+
+ // result.value
+ __ pop(eax); // result.value
+ context()->DropAndPlug(2, eax); // drop iter and g
+ break;
+ }
}
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 1d9e9421b1..b6244af412 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -117,6 +117,12 @@ void LCodeGen::FinishCode(Handle<Code> code) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
}
+ if (graph()->depends_on_empty_array_proto_elements()) {
+ isolate()->initial_object_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ isolate()->initial_array_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ }
}
@@ -204,6 +210,7 @@ bool LCodeGen::GeneratePrologue() {
frame_is_built_ = true;
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
+ info()->AddNoFrameRange(0, masm_->pc_offset());
__ push(esi); // Callee's context.
if (info()->IsStub()) {
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
@@ -379,9 +386,7 @@ bool LCodeGen::GenerateJumpTable() {
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
Address entry = jump_table_[i].address;
- bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
@@ -390,7 +395,7 @@ bool LCodeGen::GenerateJumpTable() {
}
if (jump_table_[i].needs_frame) {
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- if (is_lazy_deopt) {
+ if (type == Deoptimizer::LAZY) {
if (needs_frame_is_call.is_bound()) {
__ jmp(&needs_frame_is_call);
} else {
@@ -435,7 +440,7 @@ bool LCodeGen::GenerateJumpTable() {
}
}
} else {
- if (is_lazy_deopt) {
+ if (type == Deoptimizer::LAZY) {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
@@ -887,16 +892,15 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
// It's an error to deoptimize with the x87 fp stack in use.
ASSERT(x87_stack_depth_ == 0);
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -942,9 +946,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
}
ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
if (cc == no_condition && frame_is_built_) {
- if (needs_lazy_deopt) {
+ if (bailout_type == Deoptimizer::LAZY) {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
@@ -955,8 +958,10 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
if (jump_table_.is_empty() ||
jump_table_.last().address != entry ||
jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ jump_table_.last().bailout_type != bailout_type) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {
@@ -968,6 +973,21 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
}
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(cc, environment, bailout_type);
+}
+
+
+void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
+ ASSERT(!info()->IsStub());
+ DeoptimizeIf(no_condition, environment, Deoptimizer::SOFT);
+}
+
+
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -2298,46 +2318,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Handle<Object> nil_value = instr->nil() == kNullValue ?
- factory()->null_value() :
- factory()->undefined_value();
- __ cmp(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, equal);
- } else {
- Handle<Object> other_nil_value = instr->nil() == kNullValue ?
- factory()->undefined_value() :
- factory()->null_value();
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ cmp(reg, other_nil_value);
- __ j(equal, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->temp());
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
- }
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2827,9 +2807,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
+ int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
+ no_frame_start = masm_->pc_offset();
}
if (dynamic_frame_alignment_) {
Label no_padding;
@@ -2841,6 +2823,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
EmitReturn(instr, false);
+ if (no_frame_start != -1) {
+ info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
@@ -3340,7 +3325,8 @@ Operand LCodeGen::BuildFastArrayOperand(
uint32_t offset,
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer);
- int shift_size = ElementsKindToShiftSize(elements_kind);
+ int element_shift_size = ElementsKindToShiftSize(elements_kind);
+ int shift_size = element_shift_size;
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
@@ -3358,7 +3344,7 @@ Operand LCodeGen::BuildFastArrayOperand(
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
- offset + (additional_index << shift_size));
+ offset + (additional_index << element_shift_size));
}
}
@@ -3514,6 +3500,11 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
}
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ int3();
+}
+
+
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->value();
EmitPushTaggedOperand(argument);
@@ -4239,6 +4230,19 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DeoptimizeIf(overflow, instr->environment());
}
}
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (IsInteger32(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ __ test(value, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ }
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(instr->is_in_object());
@@ -6069,108 +6073,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Handle<FixedArray> literals = instr->hydrogen()->literals();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // Load the map's "bit field 2". We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ebx, Map::kElementsKindMask);
- __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call and pick the right
- // runtime function or stub to call. Boilerplate already exists,
- // constant elements are never accessed, pass an empty fixed array.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- __ LoadHeapObject(eax, literals);
- __ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(ecx, Immediate(isolate()->factory()->empty_fixed_array()));
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(isolate()->factory()->empty_fixed_array()));
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(isolate()->factory()->empty_fixed_array()));
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- __ LoadHeapObject(eax, literals);
- __ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(ecx, Immediate(isolate()->factory()->empty_fixed_array()));
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Handle<FixedArray> literals = instr->hydrogen()->literals();
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= instr->hydrogen()->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
-
- // Set up the parameters to the stub/runtime call and pick the right
- // runtime function or stub to call.
- int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
- instr->hydrogen()->depth() > 1) {
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- __ LoadHeapObject(eax, literals);
- __ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(ecx, Immediate(constant_properties));
- __ mov(edx, Immediate(Smi::FromInt(flags)));
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(eax));
__ push(eax);
@@ -6399,7 +6301,11 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
+ if (instr->hydrogen_value()->IsSoftDeoptimize()) {
+ SoftDeoptimize(instr->environment());
+ } else {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 1fea25bdbf..9f8d4fd363 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -276,7 +276,11 @@ class LCodeGen BASE_EMBEDDED {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void SoftDeoptimize(LEnvironment* environment);
void AddToTranslation(Translation* translation,
LOperand* op,
@@ -397,23 +401,11 @@ class LCodeGen BASE_EMBEDDED {
MacroAssembler* const masm_;
CompilationInfo* const info_;
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 6c9098e757..dec5697f87 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -235,15 +235,6 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
value()->PrintTo(stream);
@@ -1058,6 +1049,11 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
}
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1538,7 +1534,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
}
return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
+ instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kCanOverflow))
? AssignEnvironment(result)
: result;
} else if (instr->representation().IsTagged()) {
@@ -1732,13 +1729,6 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- // We only need a temp register for non-strict compare.
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -1855,6 +1845,7 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegister(instr->index());
ASSERT(ecx.is_byte_register());
+ // TODO(titzer): the machine code for this instruction overwrites ecx! fix!
LOperand* value = UseFixed(instr->value(), ecx);
LSeqStringSetChar* result =
new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
@@ -2457,7 +2448,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LStoreNamedField* result =
new(zone()) LStoreNamedField(obj, val, temp, temp_map);
- if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject())) {
return AssignEnvironment(result);
}
return result;
@@ -2530,20 +2523,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 8202418918..b32ead9138 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -50,7 +50,6 @@ class LCodeGen;
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
- V(ArrayLiteral) \
V(BitI) \
V(BitNotI) \
V(BoundsCheck) \
@@ -85,6 +84,7 @@ class LCodeGen;
V(ConstantI) \
V(ConstantT) \
V(Context) \
+ V(DebugBreak) \
V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
@@ -109,7 +109,6 @@ class LCodeGen;
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
@@ -145,7 +144,6 @@ class LCodeGen;
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -572,6 +570,12 @@ class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
};
+class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
class LModI: public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -834,26 +838,6 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@@ -2565,32 +2549,6 @@ class LAllocate: public LTemplateInstruction<1, 2, 1> {
};
-class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArrayLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LObjectLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 175b1ca1d2..5c18cae461 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1954,14 +1954,14 @@ static const bool kReturnHandlesDirectly = false;
#endif
-Operand ApiParameterOperand(int index) {
- return Operand(
- esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
+Operand ApiParameterOperand(int index, bool returns_handle) {
+ int offset = (index +(kReturnHandlesDirectly || !returns_handle ? 0 : 1));
+ return Operand(esp, offset * kPointerSize);
}
-void MacroAssembler::PrepareCallApiFunction(int argc) {
- if (kReturnHandlesDirectly) {
+void MacroAssembler::PrepareCallApiFunction(int argc, bool returns_handle) {
+ if (kReturnHandlesDirectly || !returns_handle) {
EnterApiExitFrame(argc);
// When handles are returned directly we don't have to allocate extra
// space for and pass an out parameter.
@@ -1990,7 +1990,9 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- int stack_space) {
+ int stack_space,
+ bool returns_handle,
+ int return_value_offset) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
@@ -2026,23 +2028,29 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
PopSafepointRegisters();
}
- if (!kReturnHandlesDirectly) {
- // PrepareCallApiFunction saved pointer to the output slot into
- // callee-save register esi.
- mov(eax, Operand(esi, 0));
- }
-
- Label empty_handle;
Label prologue;
+ if (returns_handle) {
+ if (!kReturnHandlesDirectly) {
+ // PrepareCallApiFunction saved pointer to the output slot into
+ // callee-save register esi.
+ mov(eax, Operand(esi, 0));
+ }
+ Label empty_handle;
+ // Check if the result handle holds 0.
+ test(eax, eax);
+ j(zero, &empty_handle);
+ // It was non-zero. Dereference to get the result value.
+ mov(eax, Operand(eax, 0));
+ jmp(&prologue);
+ bind(&empty_handle);
+ }
+ // Load the value from ReturnValue
+ mov(eax, Operand(ebp, return_value_offset * kPointerSize));
+
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
- // Check if the result handle holds 0.
- test(eax, eax);
- j(zero, &empty_handle);
- // It was non-zero. Dereference to get the result value.
- mov(eax, Operand(eax, 0));
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
@@ -2098,11 +2106,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
- bind(&empty_handle);
- // It was zero; the result is undefined.
- mov(eax, isolate()->factory()->undefined_value());
- jmp(&prologue);
-
bind(&promote_scheduled_exception);
TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 519652ac3f..e7a075d10d 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -769,13 +769,16 @@ class MacroAssembler: public Assembler {
// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
// etc. Saves context (esi). If space was reserved for return value then
// stores the pointer to the reserved slot into esi.
- void PrepareCallApiFunction(int argc);
+ void PrepareCallApiFunction(int argc, bool returns_handle);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Clobbers ebx, edi and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address, int stack_space);
+ void CallApiFunctionAndReturn(Address function_address,
+ int stack_space,
+ bool returns_handle,
+ int return_value_offset_from_ebp);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
@@ -1010,7 +1013,7 @@ inline Operand GlobalObjectOperand() {
// Generates an Operand for saving parameters after PrepareCallApiFunction.
-Operand ApiParameterOperand(int index);
+Operand ApiParameterOperand(int index, bool returns_handle);
#ifdef GENERATED_CODE_COVERAGE
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 9623b9a520..88ea4b2dc8 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -420,7 +420,7 @@ static void CompileCallLoadPropertyWithInterceptor(
// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 4;
+static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
// Reserves space for the extra arguments to API function in the
@@ -469,10 +469,11 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// (first fast api call extra argument)
// -- esp[12] : api call data
// -- esp[16] : isolate
- // -- esp[20] : last argument
+ // -- esp[20] : ReturnValue
+ // -- esp[24] : last argument
// -- ...
- // -- esp[(argc + 4) * 4] : first argument
- // -- esp[(argc + 5) * 4] : receiver
+ // -- esp[(argc + 5) * 4] : first argument
+ // -- esp[(argc + 6) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -492,9 +493,12 @@ static void GenerateFastApiCall(MacroAssembler* masm,
}
__ mov(Operand(esp, 4 * kPointerSize),
Immediate(reinterpret_cast<int>(masm->isolate())));
+ __ mov(Operand(esp, 5 * kPointerSize),
+ masm->isolate()->factory()->undefined_value());
// Prepare arguments.
- __ lea(eax, Operand(esp, 4 * kPointerSize));
+ STATIC_ASSERT(kFastApiCallArguments == 5);
+ __ lea(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
@@ -502,23 +506,31 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(masm->isolate(),
+ reinterpret_cast<void*>(function_address));
+ __ PrepareCallApiFunction(kApiArgc + kApiStackSpace, returns_handle);
- __ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
+ // v8::Arguments::implicit_args_.
+ __ mov(ApiParameterOperand(1, returns_handle), eax);
__ add(eax, Immediate(argc * kPointerSize));
- __ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_.
- __ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_.
+ // v8::Arguments::values_.
+ __ mov(ApiParameterOperand(2, returns_handle), eax);
+ // v8::Arguments::length_.
+ __ Set(ApiParameterOperand(3, returns_handle), Immediate(argc));
// v8::Arguments::is_construct_call_.
- __ Set(ApiParameterOperand(4), Immediate(0));
+ __ Set(ApiParameterOperand(4, returns_handle), Immediate(0));
// v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(1));
- __ mov(ApiParameterOperand(0), eax);
+ __ lea(eax, ApiParameterOperand(1, returns_handle));
+ __ mov(ApiParameterOperand(0, returns_handle), eax);
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ CallApiFunctionAndReturn(function_address,
- argc + kFastApiCallArguments + 1);
+ argc + kFastApiCallArguments + 1,
+ returns_handle,
+ kFastApiCallArguments + 1);
}
@@ -826,6 +838,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_restore_name);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
@@ -996,6 +1010,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
@@ -1361,6 +1377,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
__ push(Immediate(reinterpret_cast<int>(isolate())));
+ __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const ExecutableAccessorInfo& to the C++ callback.
@@ -1371,22 +1388,29 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ push(scratch3()); // Restore return address.
- // 4 elements array for v8::Arguments::values_, handler for name and pointer
+ // array for v8::Arguments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
- const int kStackSpace = 6;
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
const int kApiArgc = 2;
- __ PrepareCallApiFunction(kApiArgc);
- __ mov(ApiParameterOperand(0), ebx); // name.
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(isolate(),
+ reinterpret_cast<void*>(getter_address));
+ __ PrepareCallApiFunction(kApiArgc, returns_handle);
+ __ mov(ApiParameterOperand(0, returns_handle), ebx); // name.
__ add(ebx, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(1), ebx); // arguments pointer.
+ __ mov(ApiParameterOperand(1, returns_handle), ebx); // arguments pointer.
// Emitting a stub call may try to allocate (if the code is not
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ CallApiFunctionAndReturn(getter_address, kStackSpace);
+
+ __ CallApiFunctionAndReturn(getter_address,
+ kStackSpace,
+ returns_handle,
+ 4);
}
@@ -2489,7 +2513,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
name, depth, &miss);
// Move the return address on top of the stack.
- __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax);
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 31845f2142..ea0c1fbbe1 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -2020,7 +2020,14 @@ MaybeObject* KeyedStoreIC::Store(State state,
bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
!(FLAG_harmony_observation && object->IsJSObject() &&
- JSObject::cast(*object)->map()->is_observed());
+ JSObject::cast(*object)->map()->is_observed());
+ if (use_ic && !object->IsSmi()) {
+ // Don't use ICs for maps of the objects in Array's prototype chain. We
+ // expect to be able to trap element sets to objects with those maps in the
+ // runtime to enable optimization of element hole access.
+ Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+ if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
+ }
ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic) {
@@ -2881,25 +2888,17 @@ RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
}
-Code* CompareNilIC::GetRawUninitialized(EqualityKind kind,
- NilValue nil) {
- CompareNilICStub stub(kind, nil);
- Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code, Isolate::Current()));
- return code;
-}
-
-
void CompareNilIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
Code::ExtraICState state = target->extended_extra_ic_state();
- EqualityKind kind =
- CompareNilICStub::EqualityKindFromExtraICState(state);
- NilValue nil =
- CompareNilICStub::NilValueFromExtraICState(state);
+ CompareNilICStub stub(state, CompareNilICStub::CODE_STUB_IS_MISS);
+ stub.ClearTypes();
+
+ Code* code = NULL;
+ CHECK(stub.FindCodeInCache(&code, target->GetIsolate()));
- SetTargetAtAddress(address, GetRawUninitialized(kind, nil));
+ SetTargetAtAddress(address, code);
}
@@ -2923,28 +2922,24 @@ MaybeObject* CompareNilIC::DoCompareNilSlow(EqualityKind kind,
MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+ CompareNilICStub stub(extra_ic_state);
+
// Extract the current supported types from the patched IC and calculate what
// types must be supported as a result of the miss.
- bool already_monomorphic;
- CompareNilICStub::Types types =
- CompareNilICStub::GetPatchedICFlags(extra_ic_state,
- object, &already_monomorphic);
+ bool already_monomorphic = stub.IsMonomorphic();
+
+ stub.Record(object);
- EqualityKind kind =
- CompareNilICStub::EqualityKindFromExtraICState(extra_ic_state);
- NilValue nil =
- CompareNilICStub::NilValueFromExtraICState(extra_ic_state);
+ EqualityKind kind = stub.GetKind();
+ NilValue nil = stub.GetNilValue();
// Find or create the specialized stub to support the new set of types.
- CompareNilICStub stub(kind, nil, types);
Handle<Code> code;
- if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) {
+ if (stub.IsMonomorphic()) {
Handle<Map> monomorphic_map(already_monomorphic
? target()->FindFirstMap()
: HeapObject::cast(*object)->map());
- code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map,
- nil,
- stub.GetTypes());
+ code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map, stub);
} else {
code = stub.GetCode(isolate());
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 739f34ce56..a044f0cc9d 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -787,8 +787,6 @@ class CompareNilIC: public IC {
static Handle<Code> GetUninitialized();
- static Code* GetRawUninitialized(EqualityKind kind, NilValue nil);
-
static void Clear(Address address, Code* target);
void patch(Code* code);
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 79a9020bfc..8ae0c74d0f 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -835,7 +835,7 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
}
-void Isolate::PrintStack() {
+void Isolate::PrintStack(FILE* out) {
if (stack_trace_nesting_level_ == 0) {
stack_trace_nesting_level_++;
@@ -850,7 +850,7 @@ void Isolate::PrintStack() {
StringStream accumulator(allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
- accumulator.OutputToStdOut();
+ accumulator.OutputToFile(out);
InitializeLoggingAndCounters();
accumulator.Log();
incomplete_message_ = NULL;
@@ -865,7 +865,7 @@ void Isolate::PrintStack() {
"\n\nAttempt to print stack while printing stack (double fault)\n");
OS::PrintError(
"If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message_->OutputToStdOut();
+ incomplete_message_->OutputToFile(out);
}
}
@@ -1752,7 +1752,8 @@ Isolate::Isolate()
deferred_handles_head_(NULL),
optimizing_compiler_thread_(this),
marking_thread_(NULL),
- sweeper_thread_(NULL) {
+ sweeper_thread_(NULL),
+ callback_table_(NULL) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -2443,6 +2444,44 @@ HTracer* Isolate::GetHTracer() {
}
+Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
+ Context* native_context = context()->native_context();
+ Object* maybe_map_array = native_context->js_array_maps();
+ if (!maybe_map_array->IsUndefined()) {
+ Object* maybe_transitioned_map =
+ FixedArray::cast(maybe_map_array)->get(kind);
+ if (!maybe_transitioned_map->IsUndefined()) {
+ return Map::cast(maybe_transitioned_map);
+ }
+ }
+ return NULL;
+}
+
+
+bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
+ Map* root_array_map =
+ get_initial_js_array_map(GetInitialFastElementsKind());
+ ASSERT(root_array_map != NULL);
+ JSObject* initial_array_proto = JSObject::cast(*initial_array_prototype());
+
+ // Check that the array prototype hasn't been altered WRT empty elements.
+ if (root_array_map->prototype() != initial_array_proto) return false;
+ if (initial_array_proto->elements() != heap()->empty_fixed_array()) {
+ return false;
+ }
+
+ // Check that the object prototype hasn't been altered WRT empty elements.
+ JSObject* initial_object_proto = JSObject::cast(*initial_object_prototype());
+ Object* root_array_map_proto = initial_array_proto->GetPrototype();
+ if (root_array_map_proto != initial_object_proto) return false;
+ if (initial_object_proto->elements() != heap()->empty_fixed_array()) {
+ return false;
+ }
+
+ return initial_object_proto->GetPrototype()->IsNull();
+}
+
+
CodeStubInterfaceDescriptor*
Isolate::code_stub_interface_descriptor(int index) {
return code_stub_interface_descriptors_ + index;
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 71d86f4ac0..de7e35e591 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -51,6 +51,7 @@ namespace v8 {
namespace internal {
class Bootstrapper;
+class CallbackTable;
class CodeGenerator;
class CodeRange;
struct CodeStubInterfaceDescriptor;
@@ -724,7 +725,7 @@ class Isolate {
void PrintCurrentStackTrace(FILE* out);
void PrintStackTrace(FILE* out, char* thread_data);
void PrintStack(StringStream* accumulator);
- void PrintStack();
+ void PrintStack(FILE* out);
Handle<String> StackTraceString();
NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
Object* object,
@@ -835,6 +836,9 @@ class Isolate {
#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
Handle<type> name() { \
return Handle<type>(context()->native_context()->name(), this); \
+ } \
+ bool is_##name(type* value) { \
+ return context()->native_context()->is_##name(value); \
}
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
@@ -1067,6 +1071,10 @@ class Isolate {
date_cache_ = date_cache;
}
+ Map* get_initial_js_array_map(ElementsKind kind);
+
+ bool IsFastArrayConstructorPrototypeChainIntact();
+
CodeStubInterfaceDescriptor*
code_stub_interface_descriptor(int index);
@@ -1095,6 +1103,13 @@ class Isolate {
return sweeper_thread_;
}
+ CallbackTable* callback_table() {
+ return callback_table_;
+ }
+ void set_callback_table(CallbackTable* callback_table) {
+ callback_table_ = callback_table;
+ }
+
HStatistics* GetHStatistics();
HTracer* GetHTracer();
@@ -1332,6 +1347,7 @@ class Isolate {
OptimizingCompilerThread optimizing_compiler_thread_;
MarkingThread** marking_thread_;
SweeperThread** sweeper_thread_;
+ CallbackTable* callback_table_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index e52d0f3969..00fa432686 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1016,6 +1016,7 @@ static const char* ComputeMarker(Code* code) {
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
+ CompilationInfo* info,
Name* name) {
if (!is_logging_code_events()) return;
if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
@@ -1093,6 +1094,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
+ CompilationInfo* info,
Name* source, int line) {
if (!is_logging_code_events()) return;
if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
@@ -1735,20 +1737,20 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
PROFILE(isolate_,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code, *shared,
+ *code, *shared, NULL,
*script_name, line_num + 1));
} else {
// Can't distinguish eval and script here, so always use Script.
PROFILE(isolate_,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *code, *shared, *script_name));
+ *code, *shared, NULL, *script_name));
}
} else {
PROFILE(isolate_,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code, *shared, *func_name));
+ *code, *shared, NULL, *func_name));
}
} else if (shared->IsApiFunction()) {
// API function.
@@ -1763,7 +1765,7 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
} else {
PROFILE(isolate_,
CodeCreateEvent(
- Logger::LAZY_COMPILE_TAG, *code, *shared, *func_name));
+ Logger::LAZY_COMPILE_TAG, *code, *shared, NULL, *func_name));
}
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 8db13df805..531f1de0bd 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -79,6 +79,7 @@ class Ticker;
class Isolate;
class PositionsRecorder;
class CpuProfiler;
+class CompilationInfo;
#undef LOG
#define LOG(isolate, Call) \
@@ -235,10 +236,12 @@ class Logger {
void CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
+ CompilationInfo* info,
Name* name);
void CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
+ CompilationInfo* info,
Name* source, int line);
void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
void CodeMovingGCEvent();
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 62dee48472..29d8616181 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -1054,6 +1054,70 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
}
+void CodeFlusher::ProcessOptimizedCodeMaps() {
+ static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
+ static const int kEntryLength = SharedFunctionInfo::kEntryLength;
+ static const int kContextOffset = 0;
+ static const int kCodeOffset = 1;
+ static const int kLiteralsOffset = 2;
+ STATIC_ASSERT(kEntryLength == 3);
+
+ SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+ SharedFunctionInfo* next_holder;
+ while (holder != NULL) {
+ next_holder = GetNextCodeMap(holder);
+ ClearNextCodeMap(holder);
+
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ int new_length = kEntriesStart;
+ int old_length = code_map->length();
+ for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
+ Code* code = Code::cast(code_map->get(i + kCodeOffset));
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ if (!code_mark.Get()) {
+ continue;
+ }
+
+ // Update and record the context slot in the optimizled code map.
+ Object** context_slot = HeapObject::RawField(code_map,
+ FixedArray::OffsetOfElementAt(new_length));
+ code_map->set(new_length++, code_map->get(i + kContextOffset));
+ ASSERT(Marking::IsBlack(
+ Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(context_slot, context_slot, *context_slot);
+
+ // Update and record the code slot in the optimized code map.
+ Object** code_slot = HeapObject::RawField(code_map,
+ FixedArray::OffsetOfElementAt(new_length));
+ code_map->set(new_length++, code_map->get(i + kCodeOffset));
+ ASSERT(Marking::IsBlack(
+ Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(code_slot, code_slot, *code_slot);
+
+ // Update and record the literals slot in the optimized code map.
+ Object** literals_slot = HeapObject::RawField(code_map,
+ FixedArray::OffsetOfElementAt(new_length));
+ code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
+ ASSERT(Marking::IsBlack(
+ Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(literals_slot, literals_slot, *literals_slot);
+ }
+
+ // Trim the optimized code map if entries have been removed.
+ if (new_length < old_length) {
+ holder->TrimOptimizedCodeMap(old_length - new_length);
+ }
+
+ holder = next_holder;
+ }
+
+ optimized_code_map_holder_head_ = NULL;
+}
+
+
void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
// Make sure previous flushing decisions are revisited.
isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
@@ -1112,6 +1176,36 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
}
+void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+ ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
+ get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
+
+ // Make sure previous flushing decisions are revisited.
+ isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
+
+ SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+ SharedFunctionInfo* next_holder;
+ if (holder == code_map_holder) {
+ next_holder = GetNextCodeMap(code_map_holder);
+ optimized_code_map_holder_head_ = next_holder;
+ ClearNextCodeMap(code_map_holder);
+ } else {
+ while (holder != NULL) {
+ next_holder = GetNextCodeMap(holder);
+
+ if (next_holder == code_map_holder) {
+ next_holder = GetNextCodeMap(code_map_holder);
+ SetNextCodeMap(holder, next_holder);
+ ClearNextCodeMap(code_map_holder);
+ break;
+ }
+
+ holder = next_holder;
+ }
+ }
+}
+
+
void CodeFlusher::EvictJSFunctionCandidates() {
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
@@ -1136,6 +1230,18 @@ void CodeFlusher::EvictSharedFunctionInfoCandidates() {
}
+void CodeFlusher::EvictOptimizedCodeMaps() {
+ SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+ SharedFunctionInfo* next_holder;
+ while (holder != NULL) {
+ next_holder = GetNextCodeMap(holder);
+ EvictOptimizedCodeMap(holder);
+ holder = next_holder;
+ }
+ ASSERT(optimized_code_map_holder_head_ == NULL);
+}
+
+
void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
Heap* heap = isolate_->heap();
@@ -3955,6 +4061,10 @@ void MarkCompactCollector::EnableCodeFlushing(bool enable) {
delete code_flusher_;
code_flusher_ = NULL;
}
+
+ if (FLAG_trace_code_flushing) {
+ PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
+ }
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index cbc8f410c6..0f20440062 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -406,9 +406,10 @@ class SlotsBuffer {
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in two ways:
+// be unreachable. Code objects can be referenced in three ways:
// - SharedFunctionInfo references unoptimized code.
// - JSFunction references either unoptimized or optimized code.
+// - OptimizedCodeMap references optimized code.
// We are not allowed to flush unoptimized code for functions that got
// optimized or inlined into optimized code, because we might bailout
// into the unoptimized code again during deoptimization.
@@ -417,7 +418,8 @@ class CodeFlusher {
explicit CodeFlusher(Isolate* isolate)
: isolate_(isolate),
jsfunction_candidates_head_(NULL),
- shared_function_info_candidates_head_(NULL) {}
+ shared_function_info_candidates_head_(NULL),
+ optimized_code_map_holder_head_(NULL) {}
void AddCandidate(SharedFunctionInfo* shared_info) {
if (GetNextCandidate(shared_info) == NULL) {
@@ -434,15 +436,25 @@ class CodeFlusher {
}
}
+ void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+ if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
+ SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
+ optimized_code_map_holder_head_ = code_map_holder;
+ }
+ }
+
+ void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictCandidate(SharedFunctionInfo* shared_info);
void EvictCandidate(JSFunction* function);
void ProcessCandidates() {
+ ProcessOptimizedCodeMaps();
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
void EvictAllCandidates() {
+ EvictOptimizedCodeMaps();
EvictJSFunctionCandidates();
EvictSharedFunctionInfoCandidates();
}
@@ -450,8 +462,10 @@ class CodeFlusher {
void IteratePointersToFromSpace(ObjectVisitor* v);
private:
+ void ProcessOptimizedCodeMaps();
void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates();
+ void EvictOptimizedCodeMaps();
void EvictJSFunctionCandidates();
void EvictSharedFunctionInfoCandidates();
@@ -489,9 +503,27 @@ class CodeFlusher {
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
}
+ static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
+ return reinterpret_cast<SharedFunctionInfo*>(next_map);
+ }
+
+ static void SetNextCodeMap(SharedFunctionInfo* holder,
+ SharedFunctionInfo* next_holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
+ }
+
+ static void ClearNextCodeMap(SharedFunctionInfo* holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
+ }
+
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
SharedFunctionInfo* shared_function_info_candidates_head_;
+ SharedFunctionInfo* optimized_code_map_holder_head_;
DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index b9bce1ebd1..296965d37d 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -98,7 +98,10 @@ var kMessages = {
observe_non_object: ["Object.", "%0", " cannot ", "%0", " non-object"],
observe_non_function: ["Object.", "%0", " cannot deliver to non-function"],
observe_callback_frozen: ["Object.observe cannot deliver to a frozen function object"],
+ observe_invalid_accept: ["Object.observe accept must be an array of strings."],
observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
+ observe_perform_non_string: ["Invalid non-string changeType"],
+ observe_perform_non_function: ["Cannot perform non-function"],
observe_notify_non_notifier: ["notify called on non-notifier object"],
proto_poison_pill: ["Generic use of __proto__ accessor not allowed"],
parameterless_typed_array_constr:
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 700bcc4a53..973b1bb0be 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -1378,6 +1378,11 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index f1c2553d1c..7c09bb3e93 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -309,8 +309,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into t0.
- __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
- __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
+ __ lw(t0, FieldMemOperand(a1, SharedFunctionInfo::kFirstCodeSlot));
+ __ lw(t1, FieldMemOperand(a1, SharedFunctionInfo::kFirstContextSlot));
__ Branch(&install_optimized, eq, a2, Operand(t1));
// Iterate through the rest of map backwards. t0 holds an index as a Smi.
@@ -318,11 +318,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
__ bind(&loop);
// Do not double check first entry.
-
__ Branch(&install_unoptimized, eq, t0,
- Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ Subu(t0, t0, Operand(
- Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
+ Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
+ __ Subu(t0, t0, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t1, t1, Operand(at));
@@ -2400,8 +2398,12 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -2426,7 +2428,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ GetObjectType(right, a2, a2);
__ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub string_add_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_stub);
@@ -2746,8 +2749,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -2794,8 +2801,12 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -2818,8 +2829,12 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -2835,7 +2850,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ GetObjectType(left, a2, a2);
__ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ StringAddStub string_add_left_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
@@ -2845,7 +2861,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ GetObjectType(right, a2, a2);
__ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ StringAddStub string_add_right_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
@@ -5139,6 +5156,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ bind(&megamorphic);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
@@ -6181,7 +6199,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
+ if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
__ JumpIfEitherSmi(a0, a1, &call_runtime);
// Load instance types.
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
@@ -6470,15 +6488,49 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ if ((flags_ & ERECT_FRAME) != 0) {
+ GenerateRegisterArgsPop(masm);
+ // Build a frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ __ CallRuntime(Runtime::kStringAdd, 2);
+ }
+ __ Ret();
+ } else {
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ }
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ if ((flags_ & ERECT_FRAME) != 0) {
+ GenerateRegisterArgsPop(masm);
+ // Build a frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
+ }
+ __ Ret();
+ } else {
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
}
}
+void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ push(a0);
+ __ push(a1);
+}
+
+
+void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
+ __ pop(a1);
+ __ pop(a0);
+}
+
+
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 3a84644a18..ec7d147988 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -212,11 +212,13 @@ class StringHelper : public AllStatic {
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_ADD_FLAGS = 1 << 0,
// Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
// Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
+ // Stub needs a frame before calling the runtime
+ ERECT_FRAME = 1 << 3,
// Omit both string checks in stub.
NO_STRING_CHECK_IN_STUB =
NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
@@ -242,6 +244,9 @@ class StringAddStub: public PlatformCodeStub {
Register scratch4,
Label* slow);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateRegisterArgsPop(MacroAssembler* masm);
+
const StringAddFlags flags_;
};
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index ecf4087328..31fad2bc45 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -1,3 +1,4 @@
+
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
@@ -51,14 +52,13 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
+ // The optimized code is going to be patched, so we cannot use it any more.
+ function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function");
+
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
@@ -266,7 +266,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
+ PrintFunctionName();
PrintF(" => node=%u, frame=%d->%d]\n",
ast_id,
input_frame_size,
@@ -360,185 +360,12 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
+ PrintFunctionName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
-// This code is very similar to ia32/arm code, but relies on register names
-// (fp, sp) and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- output_frame->SetRegister(fp.code(), fp_value);
- }
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -583,6 +410,12 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on MIPS in the input frame.
+ return false;
+}
+
+
#define __ masm()->
@@ -627,7 +460,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the address of the location in the code object if possible (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register t0.
- if (type() == EAGER) {
+ if (type() == EAGER || type() == SOFT) {
__ mov(a3, zero_reg);
// Correct one word for bailout id.
__ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
@@ -690,7 +523,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
- if (type() == EAGER || type() == OSR) {
+ if (type() == EAGER || type() == SOFT || type() == OSR) {
__ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
@@ -807,7 +640,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
Label start;
__ bind(&start);
- if (type() != EAGER) {
+ if (type() != EAGER && type() != SOFT) {
// Emulate ia32 like call by pushing return address to stack.
__ addiu(sp, sp, -2 * kPointerSize);
__ sw(ra, MemOperand(sp, 1 * kPointerSize));
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index 79505ae9cb..540caa9d0d 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -47,6 +47,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) {
}
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+
+
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 872af86a95..bdfa43b2e7 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -179,6 +179,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
// Adjust fp to point to caller's fp.
__ Addu(fp, sp, Operand(2 * kPointerSize));
+ info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -438,9 +439,11 @@ void FullCodeGenerator::EmitReturnSequence() {
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
+ int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
masm_->Addu(sp, sp, Operand(sp_delta));
masm_->Jump(ra);
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
#ifdef DEBUG
@@ -1965,8 +1968,108 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING:
- UNIMPLEMENTED();
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&l_send);
+
+ // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ __ mov(a0, v0);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ push(a3); // iter
+ __ push(a0); // exception
+ __ mov(a0, a3); // iter
+ __ push(a0); // push LoadIC state
+ __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
+ Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(throw_ic); // iter.throw in a0
+ __ mov(a0, v0);
+ __ Addu(sp, sp, Operand(kPointerSize)); // drop LoadIC state
+ __ jmp(&l_call);
+
+ // try { received = yield result.value }
+ __ bind(&l_try);
+ __ pop(a0); // result.value
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(a0); // result.value
+ __ lw(a3, MemOperand(sp, (0 + 1) * kPointerSize + handler_size)); // g
+ __ push(a3); // g
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ mov(a0, v0);
+ __ lw(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&l_resume, ne, a0, Operand(at));
+ EmitReturnIteratorResult(false);
+ __ mov(a0, v0);
+ __ bind(&l_resume); // received in a0
+ __ PopTryHandler();
+
+ // receiver = iter; f = iter.send; arg = received;
+ __ bind(&l_send);
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ push(a3); // iter
+ __ push(a0); // received
+ __ mov(a0, a3); // iter
+ __ push(a0); // push LoadIC state
+ __ LoadRoot(a2, Heap::ksend_stringRootIndex); // "send"
+ Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(send_ic); // iter.send in a0
+ __ mov(a0, v0);
+ __ Addu(sp, sp, Operand(kPointerSize)); // drop LoadIC state
+
+ // result = f.call(receiver, arg);
+ __ bind(&l_call);
+ Label l_call_runtime;
+ __ JumpIfSmi(a0, &l_call_runtime);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&l_call_runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
+ __ mov(a1, a0);
+ ParameterCount count(1);
+ __ InvokeFunction(a1, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&l_loop);
+ __ bind(&l_call_runtime);
+ __ push(a0);
+ __ CallRuntime(Runtime::kCall, 3);
+
+ // val = result.value; if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ __ mov(a0, v0);
+ // result.value
+ __ push(a0); // save result
+ __ LoadRoot(a2, Heap::kvalue_stringRootIndex); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in a0
+ __ mov(a0, v0);
+ __ pop(a1); // result
+ __ push(a0); // result.value
+ __ mov(a0, a1); // result
+ __ push(a0); // push LoadIC state
+ __ LoadRoot(a2, Heap::kdone_stringRootIndex); // "done"
+ Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(done_ic); // result.done in v0
+ __ Addu(sp, sp, Operand(kPointerSize)); // drop LoadIC state
+ ToBooleanStub stub(v0);
+ __ CallStub(&stub);
+ __ Branch(&l_try, eq, v0, Operand(zero_reg));
+
+ // result.value
+ __ pop(v0); // result.value
+ context()->DropAndPlug(2, v0); // drop iter and g
+ break;
+ }
}
}
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 02055a4483..8b2b3254f1 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -1454,7 +1454,7 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 77e4216f19..ae0d6283f4 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -95,6 +95,12 @@ void LCodeGen::FinishCode(Handle<Code> code) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
}
+ if (graph()->depends_on_empty_array_proto_elements()) {
+ isolate()->initial_object_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ isolate()->initial_array_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ }
}
@@ -173,6 +179,7 @@ bool LCodeGen::GeneratePrologue() {
__ Addu(fp, sp, Operand(2 * kPointerSize));
}
frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -343,9 +350,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
- bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
@@ -354,7 +359,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (deopt_jump_table_[i].needs_frame) {
- if (is_lazy_deopt) {
+ if (type == Deoptimizer::LAZY) {
if (needs_frame_is_call.is_bound()) {
__ Branch(&needs_frame_is_call);
} else {
@@ -386,7 +391,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
}
} else {
- if (is_lazy_deopt) {
+ if (type == Deoptimizer::LAZY) {
__ Call(t9);
} else {
__ Jump(t9);
@@ -798,15 +803,13 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
void LCodeGen::DeoptimizeIf(Condition cc,
LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type,
Register src1,
const Operand& src2) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -844,9 +847,11 @@ void LCodeGen::DeoptimizeIf(Condition cc,
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
+ (deopt_jump_table_.last().bailout_type != bailout_type) ||
(deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
deopt_jump_table_.Add(table_entry, zone());
}
__ Branch(&deopt_jump_table_.last().label, cc, src1, src2);
@@ -854,6 +859,25 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Register src1,
+ const Operand& src2) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(cc, environment, bailout_type, src1, src2);
+}
+
+
+void LCodeGen::SoftDeoptimize(LEnvironment* environment,
+ Register src1,
+ const Operand& src2) {
+ ASSERT(!info()->IsStub());
+ DeoptimizeIf(al, environment, Deoptimizer::SOFT, src1, src2);
+}
+
+
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -1803,6 +1827,11 @@ void LCodeGen::EmitBranchF(int left_block, int right_block,
}
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ stop("LDebugBreak");
+}
+
+
void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2027,48 +2056,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register scratch = scratch0();
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(at, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, eq, reg, Operand(at));
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
- __ LoadRoot(at, other_nil_value); // In the delay slot.
- __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
- __ JumpIfSmi(reg, false_label); // In the delay slot.
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(scratch, scratch, 1 << Map::kIsUndetectable);
- EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
- }
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
@@ -2547,9 +2534,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
count++;
}
}
+ int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(sp, fp);
__ Pop(ra, fp);
+ no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
@@ -2566,6 +2555,10 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ Jump(ra);
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
@@ -3911,6 +3904,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
}
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ And(scratch, value, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+ }
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(instr->is_in_object());
@@ -5180,95 +5179,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals = instr->hydrogen()->literals();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
- // Load map into a2.
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- DeoptimizeIf(ne,
- instr->environment(),
- a2,
- Operand(boilerplate_elements_kind));
- }
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(a3, literals);
- __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ li(a1, Operand(isolate()->factory()->empty_fixed_array()));
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- __ Push(a3, a2, a1);
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ Push(a3, a2, a1);
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- Handle<FixedArray> literals = instr->hydrogen()->literals();
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(a3, literals);
- __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a1, Operand(constant_properties));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- __ li(a0, Operand(Smi::FromInt(flags)));
-
- // Pick the right runtime function or stub to call.
- int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
- instr->hydrogen()->depth() > 1) {
- __ Push(a3, a2, a1, a0);
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ Push(a3, a2, a1, a0);
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -5547,7 +5457,11 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ if (instr->hydrogen_value()->IsSoftDeoptimize()) {
+ SoftDeoptimize(instr->environment(), zero_reg, Operand(zero_reg));
+ } else {
+ DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ }
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index a36059b53f..90e7bf666b 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -283,8 +283,16 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc,
LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
+ void SoftDeoptimize(LEnvironment* environment,
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
void AddToTranslation(Translation* translation,
LOperand* op,
@@ -385,18 +393,6 @@ class LCodeGen BASE_EMBEDDED {
int* offset,
AllocationSiteMode mode);
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
void EnsureSpaceForLazyDeopt();
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
@@ -414,7 +410,7 @@ class LCodeGen BASE_EMBEDDED {
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> deopt_jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 28309e2df9..5ca8cd9b96 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -208,6 +208,10 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
@@ -217,15 +221,6 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
value()->PrintTo(stream);
@@ -1377,7 +1372,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
}
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
+ instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kCanOverflow)) {
return AssignEnvironment(DefineAsRegister(mod));
} else {
return DefineAsRegister(mod);
@@ -1591,12 +1587,6 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -1709,7 +1699,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegister(instr->index());
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseTempRegister(instr->value());
LSeqStringSetChar* result =
new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
return DefineAsRegister(result);
@@ -2208,7 +2198,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject())) {
return AssignEnvironment(result);
}
return result;
@@ -2273,16 +2265,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, v0), instr);
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index bb8c9930c0..1abea90141 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -56,7 +56,6 @@ class LCodeGen;
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
- V(ArrayLiteral) \
V(BitI) \
V(BitNotI) \
V(BoundsCheck) \
@@ -90,6 +89,7 @@ class LCodeGen;
V(ConstantI) \
V(ConstantT) \
V(Context) \
+ V(DebugBreak) \
V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
@@ -114,7 +114,6 @@ class LCodeGen;
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
@@ -150,7 +149,6 @@ class LCodeGen;
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -658,6 +656,12 @@ class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
};
+class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -849,24 +853,6 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsNilAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@@ -2403,20 +2389,6 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> {
};
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 81e9ec980e..cea4bc4683 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -3929,7 +3929,9 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- int stack_space) {
+ int stack_space,
+ bool returns_handle,
+ int return_value_offset_from_fp) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
@@ -3978,22 +3980,27 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
PopSafepointRegisters();
}
- // As mentioned above, on MIPS a pointer is returned - we need to dereference
- // it to get the actual return value (which is also a pointer).
- lw(v0, MemOperand(v0));
-
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
+ Label return_value_loaded;
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- Label skip;
- LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- Branch(&skip, eq, v0, Operand(zero_reg));
- lw(a0, MemOperand(v0));
- bind(&skip);
- mov(v0, a0);
+ if (returns_handle) {
+ Label load_return_value;
+
+ // As mentioned above, on MIPS a pointer is returned - we need to
+ // dereference it to get the actual return value (which is also a pointer).
+ lw(v0, MemOperand(v0));
+
+ Branch(&load_return_value, eq, v0, Operand(zero_reg));
+ // Dereference returned value.
+ lw(v0, MemOperand(v0));
+ Branch(&return_value_loaded);
+ bind(&load_return_value);
+ }
+ // Load value from ReturnValue.
+ lw(v0, MemOperand(fp, return_value_offset_from_fp*kPointerSize));
+ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 248e5b4bca..6511223aae 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -1237,7 +1237,10 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
+ void CallApiFunctionAndReturn(ExternalReference function,
+ int stack_space,
+ bool returns_handle,
+ int return_value_offset_from_fp);
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 467345807a..18e78a5abd 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -526,7 +526,7 @@ void MipsDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
@@ -1388,10 +1388,13 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
// This signature supports direct call to accessor getter callback.
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
int32_t arg1);
+typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
+ int32_t arg1);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
@@ -1536,28 +1539,44 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
}
- } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ } else if (
+ redirection->type() == ExternalReference::DIRECT_API_CALL ||
+ redirection->type() == ExternalReference::DIRECT_API_CALL_NEW) {
// See DirectCEntryStub::GenerateCall for explanation of register usage.
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08x\n",
- FUNCTION_ADDR(target), arg1);
+ reinterpret_cast<void*>(external), arg1);
}
- v8::Handle<v8::Value> result = target(arg1);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ v8::Handle<v8::Value> result = target(arg1);
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
+ set_register(v0, arg0);
+ } else {
+ SimulatorRuntimeDirectApiCallNew target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCallNew>(external);
+ target(arg1);
+ }
+ } else if (
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
// See DirectCEntryStub::GenerateCall for explanation of register usage.
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08x %08x\n",
- FUNCTION_ADDR(target), arg1, arg2);
+ reinterpret_cast<void*>(external), arg1, arg2);
+ }
+ if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ v8::Handle<v8::Value> result = target(arg1, arg2);
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
+ set_register(v0, arg0);
+ } else {
+ SimulatorRuntimeDirectGetterCallNew target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
+ target(arg1, arg2);
}
- v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 80ab31a5e4..4a73be2dbe 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -507,6 +507,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_restore_name);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
@@ -619,7 +621,6 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
} else {
ASSERT(storage_reg.is(name_reg));
}
- __ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
offset,
name_reg,
@@ -677,6 +678,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
@@ -839,8 +842,7 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-static const int kFastApiCallArguments = 4;
-
+static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
// Reserves space for the extra arguments to API function in the
// caller's frame.
@@ -869,10 +871,11 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[4] : callee JS function
// -- sp[8] : call data
// -- sp[12] : isolate
- // -- sp[16] : last JS argument
+ // -- sp[16] : ReturnValue
+ // -- sp[20] : last JS argument
// -- ...
- // -- sp[(argc + 3) * 4] : first JS argument
- // -- sp[(argc + 4) * 4] : receiver
+ // -- sp[(argc + 4) * 4] : first JS argument
+ // -- sp[(argc + 5) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -890,13 +893,15 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
}
__ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data and isolate.
+ // Store JS function, call data, isolate and ReturnValue.
__ sw(t1, MemOperand(sp, 1 * kPointerSize));
__ sw(t2, MemOperand(sp, 2 * kPointerSize));
__ sw(t3, MemOperand(sp, 3 * kPointerSize));
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ __ sw(t1, MemOperand(sp, 4 * kPointerSize));
// Prepare arguments.
- __ Addu(a2, sp, Operand(3 * kPointerSize));
+ __ Addu(a2, sp, Operand(4 * kPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -927,13 +932,22 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
ApiFunction fun(function_address);
+ ExternalReference::Type type =
+ returns_handle ?
+ ExternalReference::DIRECT_API_CALL :
+ ExternalReference::DIRECT_API_CALL_NEW;
ExternalReference ref =
ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
+ type,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ __ CallApiFunctionAndReturn(ref,
+ kStackUnwindSpace,
+ returns_handle,
+ kFastApiCallArguments + 1);
}
class CallInterceptorCompiler BASE_EMBEDDED {
@@ -1407,12 +1421,14 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
} else {
__ li(scratch3(), Handle<Object>(callback->data(), isolate()));
}
- __ Subu(sp, sp, 4 * kPointerSize);
- __ sw(reg, MemOperand(sp, 3 * kPointerSize));
- __ sw(scratch3(), MemOperand(sp, 2 * kPointerSize));
+ __ Subu(sp, sp, 5 * kPointerSize);
+ __ sw(reg, MemOperand(sp, 4 * kPointerSize));
+ __ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
__ li(scratch3(),
Operand(ExternalReference::isolate_address(isolate())));
- __ sw(scratch3(), MemOperand(sp, 1 * kPointerSize));
+ __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
+ __ sw(scratch3(), MemOperand(sp, 2 * kPointerSize));
+ __ sw(scratch4(), MemOperand(sp, 1 * kPointerSize));
__ sw(name(), MemOperand(sp, 0 * kPointerSize));
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
@@ -1433,12 +1449,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// a2 (second argument - see note above) = AccessorInfo&
__ Addu(a2, sp, kPointerSize);
- const int kStackUnwindSpace = 5;
+ const int kStackUnwindSpace = kFastApiCallArguments + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(isolate(), getter_address);
ApiFunction fun(getter_address);
- ExternalReference ref = ExternalReference(
- &fun, ExternalReference::DIRECT_GETTER_CALL, isolate());
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ ExternalReference::Type type =
+ returns_handle ?
+ ExternalReference::DIRECT_GETTER_CALL :
+ ExternalReference::DIRECT_GETTER_CALL_NEW;
+
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ CallApiFunctionAndReturn(ref,
+ kStackUnwindSpace,
+ returns_handle,
+ 3);
}
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index 77409b9574..b28f928a48 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -66,18 +66,147 @@ function CreateObjectInfo(object) {
var info = {
changeObservers: new InternalArray,
notifier: null,
+ inactiveObservers: new InternalArray,
+ performing: { __proto__: null },
+ performingCount: 0,
};
objectInfoMap.set(object, info);
return info;
}
-function ObjectObserve(object, callback) {
+var defaultAcceptTypes = {
+ __proto__: null,
+ 'new': true,
+ 'updated': true,
+ 'deleted': true,
+ 'prototype': true,
+ 'reconfigured': true
+};
+
+function CreateObserver(callback, accept) {
+ var observer = {
+ __proto__: null,
+ callback: callback,
+ accept: defaultAcceptTypes
+ };
+
+ if (IS_UNDEFINED(accept))
+ return observer;
+
+ var acceptMap = { __proto__: null };
+ for (var i = 0; i < accept.length; i++)
+ acceptMap[accept[i]] = true;
+
+ observer.accept = acceptMap;
+ return observer;
+}
+
+function ObserverIsActive(observer, objectInfo) {
+ if (objectInfo.performingCount === 0)
+ return true;
+
+ var performing = objectInfo.performing;
+ for (var type in performing) {
+ if (performing[type] > 0 && observer.accept[type])
+ return false;
+ }
+
+ return true;
+}
+
+function ObserverIsInactive(observer, objectInfo) {
+ return !ObserverIsActive(observer, objectInfo);
+}
+
+function RemoveNullElements(from) {
+ var i = 0;
+ var j = 0;
+ for (; i < from.length; i++) {
+ if (from[i] === null)
+ continue;
+ if (j < i)
+ from[j] = from[i];
+ j++;
+ }
+
+ if (i !== j)
+ from.length = from.length - (i - j);
+}
+
+function RepartitionObservers(conditionFn, from, to, objectInfo) {
+ var anyRemoved = false;
+ for (var i = 0; i < from.length; i++) {
+ var observer = from[i];
+ if (conditionFn(observer, objectInfo)) {
+ anyRemoved = true;
+ from[i] = null;
+ to.push(observer);
+ }
+ }
+
+ if (anyRemoved)
+ RemoveNullElements(from);
+}
+
+function BeginPerformChange(objectInfo, type) {
+ objectInfo.performing[type] = (objectInfo.performing[type] || 0) + 1;
+ objectInfo.performingCount++;
+ RepartitionObservers(ObserverIsInactive,
+ objectInfo.changeObservers,
+ objectInfo.inactiveObservers,
+ objectInfo);
+}
+
+function EndPerformChange(objectInfo, type) {
+ objectInfo.performing[type]--;
+ objectInfo.performingCount--;
+ RepartitionObservers(ObserverIsActive,
+ objectInfo.inactiveObservers,
+ objectInfo.changeObservers,
+ objectInfo);
+}
+
+function EnsureObserverRemoved(objectInfo, callback) {
+ function remove(observerList) {
+ for (var i = 0; i < observerList.length; i++) {
+ if (observerList[i].callback === callback) {
+ observerList.splice(i, 1);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ if (!remove(objectInfo.changeObservers))
+ remove(objectInfo.inactiveObservers);
+}
+
+function AcceptArgIsValid(arg) {
+ if (IS_UNDEFINED(arg))
+ return true;
+
+ if (!IS_SPEC_OBJECT(arg) ||
+ !IS_NUMBER(arg.length) ||
+ arg.length < 0)
+ return false;
+
+ var length = arg.length;
+ for (var i = 0; i < length; i++) {
+ if (!IS_STRING(arg[i]))
+ return false;
+ }
+ return true;
+}
+
+function ObjectObserve(object, callback, accept) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["observe"]);
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["observe"]);
if (ObjectIsFrozen(callback))
throw MakeTypeError("observe_callback_frozen");
+ if (!AcceptArgIsValid(accept))
+ throw MakeTypeError("observe_accept_invalid");
if (!observerInfoMap.has(callback)) {
observerInfoMap.set(callback, {
@@ -90,8 +219,13 @@ function ObjectObserve(object, callback) {
if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
%SetIsObserved(object, true);
- var changeObservers = objectInfo.changeObservers;
- if (changeObservers.indexOf(callback) < 0) changeObservers.push(callback);
+ EnsureObserverRemoved(objectInfo, callback);
+
+ var observer = CreateObserver(callback, accept);
+ if (ObserverIsActive(observer, objectInfo))
+ objectInfo.changeObservers.push(observer);
+ else
+ objectInfo.inactiveObservers.push(observer);
return object;
}
@@ -106,24 +240,39 @@ function ObjectUnobserve(object, callback) {
if (IS_UNDEFINED(objectInfo))
return object;
- var changeObservers = objectInfo.changeObservers;
- var index = changeObservers.indexOf(callback);
- if (index >= 0) {
- changeObservers.splice(index, 1);
- if (changeObservers.length === 0) %SetIsObserved(object, false);
+ EnsureObserverRemoved(objectInfo, callback);
+
+ if (objectInfo.changeObservers.length === 0 &&
+ objectInfo.inactiveObservers.length === 0) {
+ %SetIsObserved(object, false);
}
return object;
}
+function ArrayObserve(object, callback) {
+ return ObjectObserve(object, callback, ['new',
+ 'updated',
+ 'deleted',
+ 'splice']);
+}
+
+function ArrayUnobserve(object, callback) {
+ return ObjectUnobserve(object, callback);
+}
+
function EnqueueChangeRecord(changeRecord, observers) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(changeRecord.name)) return;
for (var i = 0; i < observers.length; i++) {
var observer = observers[i];
- var observerInfo = observerInfoMap.get(observer);
- observationState.pendingObservers[observerInfo.priority] = observer;
+ if (IS_UNDEFINED(observer.accept[changeRecord.type]))
+ continue;
+
+ var callback = observer.callback;
+ var observerInfo = observerInfoMap.get(callback);
+ observationState.pendingObservers[observerInfo.priority] = callback;
%SetObserverDeliveryPending();
if (IS_NULL(observerInfo.pendingChangeRecords)) {
observerInfo.pendingChangeRecords = new InternalArray(changeRecord);
@@ -133,8 +282,44 @@ function EnqueueChangeRecord(changeRecord, observers) {
}
}
+function BeginPerformSplice(array) {
+ var objectInfo = objectInfoMap.get(array);
+ if (!IS_UNDEFINED(objectInfo))
+ BeginPerformChange(objectInfo, 'splice');
+}
+
+function EndPerformSplice(array) {
+ var objectInfo = objectInfoMap.get(array);
+ if (!IS_UNDEFINED(objectInfo))
+ EndPerformChange(objectInfo, 'splice');
+}
+
+function EnqueueSpliceRecord(array, index, removed, deleteCount, addedCount) {
+ var objectInfo = objectInfoMap.get(array);
+ if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
+ return;
+
+ var changeRecord = {
+ type: 'splice',
+ object: array,
+ index: index,
+ removed: removed,
+ addedCount: addedCount
+ };
+
+ changeRecord.removed.length = deleteCount;
+ // TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
+ // slow.
+ // ObjectFreeze(changeRecord);
+ // ObjectFreeze(changeRecord.removed);
+ EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
+}
+
function NotifyChange(type, object, name, oldValue) {
var objectInfo = objectInfoMap.get(object);
+ if (objectInfo.changeObservers.length === 0)
+ return;
+
var changeRecord = (arguments.length < 4) ?
{ type: type, object: object, name: name } :
{ type: type, object: object, name: name, oldValue: oldValue };
@@ -173,6 +358,36 @@ function ObjectNotifierNotify(changeRecord) {
EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
}
+function ObjectNotifierPerformChange(changeType, changeFn, receiver) {
+ if (!IS_SPEC_OBJECT(this))
+ throw MakeTypeError("called_on_non_object", ["performChange"]);
+
+ var target = notifierTargetMap.get(this);
+ if (IS_UNDEFINED(target))
+ throw MakeTypeError("observe_notify_non_notifier");
+ if (!IS_STRING(changeType))
+ throw MakeTypeError("observe_perform_non_string");
+ if (!IS_SPEC_FUNCTION(changeFn))
+ throw MakeTypeError("observe_perform_non_function");
+
+ if (IS_NULL_OR_UNDEFINED(receiver)) {
+ receiver = %GetDefaultReceiver(changeFn) || receiver;
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(changeFn)) {
+ receiver = ToObject(receiver);
+ }
+
+ var objectInfo = objectInfoMap.get(target);
+ if (IS_UNDEFINED(objectInfo))
+ return;
+
+ BeginPerformChange(objectInfo, changeType);
+ try {
+ %_CallFunction(receiver, changeFn);
+ } finally {
+ EndPerformChange(objectInfo, changeType);
+ }
+}
+
function ObjectGetNotifier(object) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["getNotifier"]);
@@ -234,8 +449,13 @@ function SetupObjectObserve() {
"observe", ObjectObserve,
"unobserve", ObjectUnobserve
));
+ InstallFunctions($Array, DONT_ENUM, $Array(
+ "observe", ArrayObserve,
+ "unobserve", ArrayUnobserve
+ ));
InstallFunctions(notifierPrototype, DONT_ENUM, $Array(
- "notify", ObjectNotifierNotify
+ "notify", ObjectNotifierNotify,
+ "performChange", ObjectNotifierPerformChange
));
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index ecbf9d6420..891f0d2302 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -306,6 +306,17 @@ void JSObject::JSObjectVerify() {
CHECK_EQ(map()->unused_property_fields(),
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
+ DescriptorArray* descriptors = map()->instance_descriptors();
+ for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
+ if (descriptors->GetDetails(i).type() == FIELD) {
+ Representation r = descriptors->GetDetails(i).representation();
+ int field = descriptors->GetFieldIndex(i);
+ Object* value = RawFastPropertyAt(field);
+ if (r.IsSmi()) ASSERT(value->IsSmi());
+ if (r.IsDouble()) ASSERT(value->IsHeapNumber());
+ if (r.IsHeapObject()) ASSERT(value->IsHeapObject());
+ }
+ }
}
CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
(elements() == GetHeap()->empty_fixed_array())),
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 06a13df5a3..95a0eca6a8 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -1030,10 +1030,7 @@ int Smi::value() {
Smi* Smi::FromInt(int value) {
ASSERT(Smi::IsValid(value));
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- intptr_t tagged_value =
- (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
- return reinterpret_cast<Smi*>(tagged_value);
+ return reinterpret_cast<Smi*>(Internals::IntToSmi(value));
}
@@ -1111,28 +1108,8 @@ Failure* Failure::Construct(Type type, intptr_t value) {
bool Smi::IsValid(intptr_t value) {
-#ifdef DEBUG
- bool in_range = (value >= kMinValue) && (value <= kMaxValue);
-#endif
-
-#ifdef V8_TARGET_ARCH_X64
- // To be representable as a long smi, the value must be a 32-bit integer.
- bool result = (value == static_cast<int32_t>(value));
-#else
- // To be representable as an tagged small integer, the two
- // most-significant bits of 'value' must be either 00 or 11 due to
- // sign-extension. To check this we add 01 to the two
- // most-significant bits, and check if the most-significant bit is 0
- //
- // CAUTION: The original code below:
- // bool result = ((value + 0x40000000) & 0x80000000) == 0;
- // may lead to incorrect results according to the C language spec, and
- // in fact doesn't work correctly with gcc4.1.1 in some cases: The
- // compiler may produce undefined results in case of signed integer
- // overflow. The computation must be done w/ unsigned ints.
- bool result = (static_cast<uintptr_t>(value + 0x40000000U) < 0x80000000U);
-#endif
- ASSERT(result == in_range);
+ bool result = Internals::IsValidSmi(value);
+ ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue);
return result;
}
@@ -1484,10 +1461,17 @@ void JSObject::initialize_properties() {
void JSObject::initialize_elements() {
- ASSERT(map()->has_fast_smi_or_object_elements() ||
- map()->has_fast_double_elements());
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
+ if (map()->has_fast_smi_or_object_elements() ||
+ map()->has_fast_double_elements()) {
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
+ } else if (map()->has_external_array_elements()) {
+ ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(map());
+ ASSERT(!GetHeap()->InNewSpace(empty_array));
+ WRITE_FIELD(this, kElementsOffset, empty_array);
+ } else {
+ UNREACHABLE();
+ }
}
@@ -1521,9 +1505,19 @@ MaybeObject* JSObject::ResetElements() {
MaybeObject* JSObject::AllocateStorageForMap(Map* map) {
ASSERT(this->map()->inobject_properties() == map->inobject_properties());
- ElementsKind expected_kind = this->map()->elements_kind();
- if (map->elements_kind() != expected_kind) {
- MaybeObject* maybe_map = map->AsElementsKind(expected_kind);
+ ElementsKind obj_kind = this->map()->elements_kind();
+ ElementsKind map_kind = map->elements_kind();
+ if (map_kind != obj_kind) {
+ ElementsKind to_kind = map_kind;
+ if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) ||
+ IsDictionaryElementsKind(obj_kind)) {
+ to_kind = obj_kind;
+ }
+ MaybeObject* maybe_obj =
+ IsDictionaryElementsKind(to_kind) ? NormalizeElements()
+ : TransitionElementsKind(to_kind);
+ if (maybe_obj->IsFailure()) return maybe_obj;
+ MaybeObject* maybe_map = map->AsElementsKind(to_kind);
if (!maybe_map->To(&map)) return maybe_map;
}
int total_size =
@@ -3608,17 +3602,15 @@ bool Map::CanBeDeprecated() {
if (FLAG_track_double_fields && details.representation().IsDouble()) {
return true;
}
+ if (FLAG_track_heap_object_fields &&
+ details.representation().IsHeapObject()) {
+ return true;
+ }
}
return false;
}
-Handle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
- if (!map->is_deprecated()) return map;
- return GeneralizeRepresentation(map, 0, Representation::Smi());
-}
-
-
void Map::NotifyLeafMapLayoutChange() {
dependent_code()->DeoptimizeDependentCodeGroup(
GetIsolate(),
@@ -4668,6 +4660,7 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
kDontOptimize)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
void SharedFunctionInfo::BeforeVisitingPointers() {
@@ -4675,11 +4668,6 @@ void SharedFunctionInfo::BeforeVisitingPointers() {
}
-void SharedFunctionInfo::ClearOptimizedCodeMap() {
- set_optimized_code_map(Smi::FromInt(0));
-}
-
-
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 0849a630c2..ddecae3bf5 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -596,8 +596,8 @@ void Map::MapPrint(FILE* out) {
}
PrintF(out, " - back pointer: ");
GetBackPointer()->ShortPrint(out);
- PrintF(out, "\n - instance descriptors %i #%i: ",
- owns_descriptors(),
+ PrintF(out, "\n - instance descriptors %s#%i: ",
+ owns_descriptors() ? "(own) " : "",
NumberOfOwnDescriptors());
instance_descriptors()->ShortPrint(out);
if (HasTransitionArray()) {
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index add247ea45..f83f00fd5d 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -311,15 +311,23 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age());
}
- if (FLAG_cache_optimized_code) {
- // Flush optimized code map on major GC.
- // TODO(mstarzinger): We may experiment with rebuilding it or with
- // retaining entries which should survive as we iterate through
- // optimized functions anyway.
+ if (FLAG_cache_optimized_code &&
+ FLAG_flush_optimized_code_cache &&
+ !shared->optimized_code_map()->IsSmi()) {
+ // Always flush the optimized code map if requested by flag.
shared->ClearOptimizedCodeMap();
}
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
+ if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+ // Add the shared function info holding an optimized code map to
+ // the code flusher for processing of code maps after marking.
+ collector->code_flusher()->AddOptimizedCodeMap(shared);
+ // Treat all references within the code map weakly by marking the
+ // code map itself but not pushing it onto the marking deque.
+ FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
+ StaticVisitor::MarkObjectWithoutPush(heap, code_map);
+ }
if (IsFlushable(heap, shared)) {
// This function's code looks flushable. But we have to postpone
// the decision until we see all functions that point to the same
@@ -332,6 +340,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
VisitSharedFunctionInfoWeakCode(heap, object);
return;
}
+ } else {
+ if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+ // Flush optimized code map on major GCs without code flushing,
+ // needed because cached code doesn't contain breakpoints.
+ shared->ClearOptimizedCodeMap();
+ }
}
VisitSharedFunctionInfoStrongCode(heap, object);
}
@@ -566,14 +580,20 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
- // If this is a full script wrapped in a function we do no flush the code.
+ // We do not (yet?) flush code for generator functions, because we don't know
+ // if there are still live activations (generator objects) on the heap.
+ if (shared_info->is_generator()) {
+ return false;
+ }
+
+ // If this is a full script wrapped in a function we do not flush the code.
if (shared_info->is_toplevel()) {
return false;
}
- // If this is a native function we do not flush the code because %SetCode
- // breaks the one-to-one relation between SharedFunctionInfo and Code.
- if (shared_info->native()) {
+ // If this is a function initialized with %SetCode then the one-to-one
+ // relation between SharedFunctionInfo and Code is broken.
+ if (shared_info->dont_flush()) {
return false;
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index d127d1bb8a..31bbbdbeee 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -339,13 +339,12 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
JSObject* self = JSObject::cast(receiver);
Handle<String> key(String::cast(name));
LOG(isolate, ApiNamedPropertyAccess("load", self, name));
- CustomArguments args(isolate, data->data(), self, this);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments args(isolate, data->data(), self, this);
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = call_fun(v8::Utils::ToLocal(key), info);
+ result = args.Call(call_fun, v8::Utils::ToLocal(key));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (result.IsEmpty()) {
@@ -1291,20 +1290,23 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
break;
}
case JS_FUNCTION_TYPE: {
- Object* fun_name = JSFunction::cast(this)->shared()->name();
+ JSFunction* function = JSFunction::cast(this);
+ Object* fun_name = function->shared()->DebugName();
bool printed = false;
if (fun_name->IsString()) {
String* str = String::cast(fun_name);
if (str->length() > 0) {
accumulator->Add("<JS Function ");
accumulator->Put(str);
- accumulator->Put('>');
printed = true;
}
}
if (!printed) {
- accumulator->Add("<JS Function>");
+ accumulator->Add("<JS Function");
}
+ accumulator->Add(" (SharedFunctionInfo %p)",
+ reinterpret_cast<void*>(function->shared()));
+ accumulator->Put('>');
break;
}
case JS_GENERATOR_OBJECT_TYPE: {
@@ -1341,6 +1343,9 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
global_object ? "Global Object: " : "",
vowel ? "n" : "");
accumulator->Put(str);
+ accumulator->Add(" with %smap 0x%p",
+ map_of_this->is_deprecated() ? "deprecated " : "",
+ map_of_this);
printed = true;
}
}
@@ -1456,9 +1461,17 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<ExternalDoubleArray[%u]>",
ExternalDoubleArray::cast(this)->length());
break;
- case SHARED_FUNCTION_INFO_TYPE:
- accumulator->Add("<SharedFunctionInfo>");
+ case SHARED_FUNCTION_INFO_TYPE: {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(this);
+ SmartArrayPointer<char> debug_name =
+ shared->DebugName()->ToCString();
+ if (debug_name[0] != 0) {
+ accumulator->Add("<SharedFunctionInfo %s>", *debug_name);
+ } else {
+ accumulator->Add("<SharedFunctionInfo>");
+ }
break;
+ }
case JS_MESSAGE_OBJECT_TYPE:
accumulator->Add("<JSMessageObject>");
break;
@@ -1788,7 +1801,9 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
int index = map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- Representation representation = value->OptimalRepresentation();
+ Representation representation = IsJSContextExtensionObject()
+ ? Representation::Tagged() : value->OptimalRepresentation();
+
FieldDescriptor new_field(name, index, attributes, representation);
ASSERT(index < map()->inobject_properties() ||
@@ -2041,8 +2056,8 @@ MaybeObject* JSObject::ConvertTransitionToMapTransition(
Map* old_target = old_map->GetTransition(transition_index);
Object* result;
- MaybeObject* maybe_result =
- ConvertDescriptorToField(name, new_value, attributes);
+ MaybeObject* maybe_result = ConvertDescriptorToField(
+ name, new_value, attributes, OMIT_TRANSITION_KEEP_REPRESENTATIONS);
if (!maybe_result->To(&result)) return maybe_result;
if (!HasFastProperties()) return result;
@@ -2083,7 +2098,8 @@ MaybeObject* JSObject::ConvertTransitionToMapTransition(
MaybeObject* JSObject::ConvertDescriptorToField(Name* name,
Object* new_value,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
if (map()->unused_property_fields() == 0 &&
TooManyFastProperties(properties()->length(), MAY_BE_STORE_FROM_KEYED)) {
Object* obj;
@@ -2092,14 +2108,14 @@ MaybeObject* JSObject::ConvertDescriptorToField(Name* name,
return ReplaceSlowProperty(name, new_value, attributes);
}
- Representation representation = new_value->OptimalRepresentation();
+ Representation representation = IsJSContextExtensionObject()
+ ? Representation::Tagged() : new_value->OptimalRepresentation();
int index = map()->NextFreePropertyIndex();
FieldDescriptor new_field(name, index, attributes, representation);
// Make a new map for the object.
Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyInsertDescriptor(&new_field,
- OMIT_TRANSITION);
+ MaybeObject* maybe_new_map = map()->CopyInsertDescriptor(&new_field, flag);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
// Make new properties array if necessary.
@@ -2137,6 +2153,7 @@ const char* Representation::Mnemonic() const {
case kSmi: return "s";
case kDouble: return "d";
case kInteger32: return "i";
+ case kHeapObject: return "h";
case kExternal: return "x";
default:
UNREACHABLE();
@@ -2171,7 +2188,7 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
- ZapEndOfFixedArray(new_end, to_trim);
+ ZapEndOfFixedArray(new_end, to_trim);
}
int size_delta = to_trim * kPointerSize;
@@ -2422,6 +2439,7 @@ Map* Map::FindRootMap() {
}
+// Returns NULL if the updated map is incompatible.
Map* Map::FindUpdatedMap(int verbatim,
int length,
DescriptorArray* descriptors) {
@@ -2437,6 +2455,17 @@ Map* Map::FindUpdatedMap(int verbatim,
int transition = transitions->Search(name);
if (transition == TransitionArray::kNotFound) break;
current = transitions->GetTarget(transition);
+ PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails target_details =
+ current->instance_descriptors()->GetDetails(i);
+ if (details.attributes() != target_details.attributes()) return NULL;
+ if (details.type() == CALLBACKS) {
+ if (target_details.type() != CALLBACKS) return NULL;
+ if (descriptors->GetValue(i) !=
+ current->instance_descriptors()->GetValue(i)) {
+ return NULL;
+ }
+ }
}
return current;
@@ -2499,10 +2528,10 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
Representation new_representation) {
Map* old_map = this;
DescriptorArray* old_descriptors = old_map->instance_descriptors();
- Representation old_reprepresentation =
+ Representation old_representation =
old_descriptors->GetDetails(modify_index).representation();
- if (old_reprepresentation.IsNone()) {
+ if (old_representation.IsNone()) {
UNREACHABLE();
old_descriptors->SetRepresentation(modify_index, new_representation);
return this;
@@ -2511,6 +2540,7 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
int descriptors = old_map->NumberOfOwnDescriptors();
Map* root_map = old_map->FindRootMap();
+ // Check the state of the root map.
if (!old_map->EquivalentToForTransition(root_map)) {
return CopyGeneralizeAllRepresentations();
}
@@ -2519,7 +2549,8 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
Map* updated = root_map->FindUpdatedMap(
verbatim, descriptors, old_descriptors);
- // Check the state of the root map.
+ if (updated == NULL) return CopyGeneralizeAllRepresentations();
+
DescriptorArray* updated_descriptors = updated->instance_descriptors();
int valid = updated->NumberOfOwnDescriptors();
@@ -2528,9 +2559,14 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
Representation updated_representation =
updated_descriptors->GetDetails(modify_index).representation();
if (new_representation.fits_into(updated_representation)) {
- if (FLAG_trace_generalization) {
- PrintF("migrating to existing map %p -> %p\n",
- static_cast<void*>(this), static_cast<void*>(updated));
+ if (FLAG_trace_generalization &&
+ !(modify_index == 0 && new_representation.IsSmi())) {
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ PrintF("migrating to existing map %p(%s) -> %p(%s)\n",
+ static_cast<void*>(this),
+ old_details.representation().Mnemonic(),
+ static_cast<void*>(updated),
+ updated_representation.Mnemonic());
}
return updated;
}
@@ -2541,10 +2577,13 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
verbatim, valid, descriptors, old_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
- old_reprepresentation =
+ old_representation =
new_descriptors->GetDetails(modify_index).representation();
- new_representation = new_representation.generalize(old_reprepresentation);
- new_descriptors->SetRepresentation(modify_index, new_representation);
+ Representation updated_representation =
+ new_representation.generalize(old_representation);
+ if (!updated_representation.Equals(old_representation)) {
+ new_descriptors->SetRepresentation(modify_index, updated_representation);
+ }
Map* split_map = root_map->FindLastMatchMap(
verbatim, descriptors, new_descriptors);
@@ -2558,10 +2597,14 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
split_map->DeprecateTarget(
old_descriptors->GetKey(descriptor), new_descriptors);
- if (FLAG_trace_generalization) {
- PrintF("migrating to new map %p -> %p (%i steps)\n",
+ if (FLAG_trace_generalization &&
+ !(modify_index == 0 && new_representation.IsSmi())) {
+ PrintF("migrating to new map %i: %p(%s) -> %p(%s) (%i steps)\n",
+ modify_index,
static_cast<void*>(this),
+ old_representation.Mnemonic(),
static_cast<void*>(new_descriptors),
+ updated_representation.Mnemonic(),
descriptors - descriptor);
}
@@ -2575,6 +2618,7 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
// during GC. Its descriptor array is too large, but it will be
// overwritten during retry anyway.
Handle<Map>(new_map);
+ return maybe_map;
}
}
@@ -2583,6 +2627,34 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
}
+Map* Map::CurrentMapForDeprecated() {
+ AssertNoAllocation no_allocation;
+ if (!is_deprecated()) return this;
+
+ DescriptorArray* old_descriptors = instance_descriptors();
+
+ int descriptors = NumberOfOwnDescriptors();
+ Map* root_map = FindRootMap();
+
+ // Check the state of the root map.
+ if (!EquivalentToForTransition(root_map)) return NULL;
+ int verbatim = root_map->NumberOfOwnDescriptors();
+
+ Map* updated = root_map->FindUpdatedMap(
+ verbatim, descriptors, old_descriptors);
+ if (updated == NULL) return NULL;
+
+ DescriptorArray* updated_descriptors = updated->instance_descriptors();
+ int valid = updated->NumberOfOwnDescriptors();
+ if (!updated_descriptors->IsMoreGeneralThan(
+ verbatim, valid, descriptors, old_descriptors)) {
+ return NULL;
+ }
+
+ return updated;
+}
+
+
MaybeObject* JSObject::SetPropertyWithInterceptor(
Name* name,
Object* value,
@@ -2598,8 +2670,7 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
v8::NamedPropertySetter setter =
v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
v8::Handle<v8::Value> result;
@@ -2610,9 +2681,9 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
isolate->heap()->undefined_value() :
value,
isolate);
- result = setter(v8::Utils::ToLocal(name_handle),
- v8::Utils::ToLocal(value_unhole),
- info);
+ result = args.Call(setter,
+ v8::Utils::ToLocal(name_handle),
+ v8::Utils::ToLocal(value_unhole));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *value_handle;
@@ -2712,14 +2783,14 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (call_fun == NULL) return value;
Handle<String> key(String::cast(name));
LOG(isolate, ApiNamedPropertyAccess("store", this, name));
- CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments
+ args(isolate, data->data(), this, JSObject::cast(holder));
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- call_fun(v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle),
- info);
+ args.Call(call_fun,
+ v8::Utils::ToLocal(key),
+ v8::Utils::ToLocal(value_handle));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value_handle;
@@ -3006,6 +3077,20 @@ Map* Map::LookupElementsTransitionMap(ElementsKind to_kind) {
}
+bool Map::IsMapInArrayPrototypeChain() {
+ Isolate* isolate = GetIsolate();
+ if (isolate->initial_array_prototype()->map() == this) {
+ return true;
+ }
+
+ if (isolate->initial_object_prototype()->map() == this) {
+ return true;
+ }
+
+ return false;
+}
+
+
static MaybeObject* AddMissingElementsTransitions(Map* map,
ElementsKind to_kind) {
ASSERT(IsFastElementsKind(map->elements_kind()));
@@ -4007,8 +4092,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(String::cast(name));
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQuery query =
v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
@@ -4018,7 +4102,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = query(v8::Utils::ToLocal(name_handle), info);
+ result = args.Call(query, v8::Utils::ToLocal(name_handle));
}
if (!result.IsEmpty()) {
ASSERT(result->IsInt32());
@@ -4033,7 +4117,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = getter(v8::Utils::ToLocal(name_handle), info);
+ result = args.Call(getter, v8::Utils::ToLocal(name_handle));
}
if (!result.IsEmpty()) return DONT_ENUM;
}
@@ -4148,8 +4232,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSReceiver> hreceiver(receiver);
Handle<JSObject> holder(this);
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
if (!interceptor->query()->IsUndefined()) {
v8::IndexedPropertyQuery query =
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
@@ -4159,7 +4242,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = query(index, info);
+ result = args.Call(query, index);
}
if (!result.IsEmpty())
return static_cast<PropertyAttributes>(result->Int32Value());
@@ -4172,7 +4255,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = getter(index, info);
+ result = args.Call(getter, index);
}
if (!result.IsEmpty()) return NONE;
}
@@ -4836,13 +4919,12 @@ MaybeObject* JSObject::DeletePropertyWithInterceptor(Name* name) {
v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = deleter(v8::Utils::ToLocal(name_handle), info);
+ result = args.Call(deleter, v8::Utils::ToLocal(name_handle));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
@@ -4873,13 +4955,12 @@ MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
Handle<JSObject> this_handle(this);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = deleter(index, info);
+ result = args.Call(deleter, index);
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
@@ -6308,7 +6389,7 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
set_transitions(transitions);
result->SetBackPointer(this);
- } else {
+ } else if (flag != OMIT_TRANSITION_KEEP_REPRESENTATIONS) {
descriptors->InitializeRepresentations(Representation::Tagged());
}
@@ -6316,6 +6397,8 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
}
+// Since this method is used to rewrite an existing transition tree, it can
+// always insert transitions without checking.
MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
DescriptorArray* descriptors) {
ASSERT(descriptors->IsSortedNoDuplicates());
@@ -6338,18 +6421,14 @@ MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
result->set_unused_property_fields(unused_property_fields);
result->set_owns_descriptors(false);
- if (CanHaveMoreTransitions()) {
- Name* name = descriptors->GetKey(new_descriptor);
- TransitionArray* transitions;
- MaybeObject* maybe_transitions =
- AddTransition(name, result, SIMPLE_TRANSITION);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+ Name* name = descriptors->GetKey(new_descriptor);
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions =
+ AddTransition(name, result, SIMPLE_TRANSITION);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
- set_transitions(transitions);
- result->SetBackPointer(this);
- } else {
- descriptors->InitializeRepresentations(Representation::Tagged());
- }
+ set_transitions(transitions);
+ result->SetBackPointer(this);
return result;
}
@@ -7467,19 +7546,13 @@ bool DescriptorArray::IsMoreGeneralThan(int verbatim,
for (int descriptor = verbatim; descriptor < valid; descriptor++) {
PropertyDetails details = GetDetails(descriptor);
PropertyDetails other_details = other->GetDetails(descriptor);
- if (details.type() != other_details.type()) {
- if (details.type() != FIELD ||
- other_details.type() != CONSTANT_FUNCTION) {
- return false;
- }
- } else if (details.type() == CONSTANT_FUNCTION) {
- if (GetValue(descriptor) != other->GetValue(descriptor)) {
- return false;
- }
- } else if (!other_details.representation().fits_into(
- details.representation())) {
+ if (!other_details.representation().fits_into(details.representation())) {
return false;
}
+ if (details.type() == CONSTANT_FUNCTION) {
+ if (other_details.type() != CONSTANT_FUNCTION) return false;
+ if (GetValue(descriptor) != other->GetValue(descriptor)) return false;
+ }
}
return true;
@@ -8343,12 +8416,13 @@ bool String::MarkAsUndetectable() {
}
-bool String::IsUtf8EqualTo(Vector<const char> str) {
+bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
int slen = length();
// Can't check exact length equality, but we can check bounds.
int str_len = str.length();
- if (str_len < slen ||
- str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize)) {
+ if (!allow_prefix_match &&
+ (str_len < slen ||
+ str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize))) {
return false;
}
int i;
@@ -8368,7 +8442,7 @@ bool String::IsUtf8EqualTo(Vector<const char> str) {
utf8_data += cursor;
remaining_in_str -= cursor;
}
- return i == slen && remaining_in_str == 0;
+ return (allow_prefix_match || i == slen) && remaining_in_str == 0;
}
@@ -8933,33 +9007,46 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<Context> native_context,
Handle<Code> code,
Handle<FixedArray> literals) {
+ CALL_HEAP_FUNCTION_VOID(
+ shared->GetIsolate(),
+ shared->AddToOptimizedCodeMap(*native_context, *code, *literals));
+}
+
+
+MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
+ Code* code,
+ FixedArray* literals) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 3);
- Object* value = shared->optimized_code_map();
- Handle<FixedArray> new_code_map;
+ Heap* heap = GetHeap();
+ FixedArray* new_code_map;
+ Object* value = optimized_code_map();
if (value->IsSmi()) {
// No optimized code map.
ASSERT_EQ(0, Smi::cast(value)->value());
// Crate 3 entries per context {context, code, literals}.
- new_code_map = FACTORY->NewFixedArray(kEntryLength);
- new_code_map->set(0, *native_context);
- new_code_map->set(1, *code);
- new_code_map->set(2, *literals);
+ MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
+ if (!maybe->To(&new_code_map)) return maybe;
+ new_code_map->set(kEntriesStart + 0, native_context);
+ new_code_map->set(kEntriesStart + 1, code);
+ new_code_map->set(kEntriesStart + 2, literals);
} else {
// Copy old map and append one new entry.
- Handle<FixedArray> old_code_map(FixedArray::cast(value));
- ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*native_context));
+ FixedArray* old_code_map = FixedArray::cast(value);
+ ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context));
int old_length = old_code_map->length();
int new_length = old_length + kEntryLength;
- new_code_map = FACTORY->NewFixedArray(new_length);
- old_code_map->CopyTo(0, *new_code_map, 0, old_length);
- new_code_map->set(old_length, *native_context);
- new_code_map->set(old_length + 1, *code);
- new_code_map->set(old_length + 2, *literals);
+ MaybeObject* maybe = old_code_map->CopySize(new_length);
+ if (!maybe->To(&new_code_map)) return maybe;
+ new_code_map->set(old_length + 0, native_context);
+ new_code_map->set(old_length + 1, code);
+ new_code_map->set(old_length + 2, literals);
+ // Zap the old map for the sake of the heap verifier.
+ if (Heap::ShouldZapGarbage()) ZapOptimizedCodeMap();
}
#ifdef DEBUG
- for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
+ for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
ASSERT(new_code_map->get(i)->IsNativeContext());
ASSERT(new_code_map->get(i + 1)->IsCode());
ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
@@ -8967,14 +9054,14 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
ASSERT(new_code_map->get(i + 2)->IsFixedArray());
}
#endif
- shared->set_optimized_code_map(*new_code_map);
+ set_optimized_code_map(new_code_map);
+ return new_code_map;
}
void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
int index) {
- ASSERT(index > 0);
- ASSERT(optimized_code_map()->IsFixedArray());
+ ASSERT(index > kEntriesStart);
FixedArray* code_map = FixedArray::cast(optimized_code_map());
if (!bound()) {
FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
@@ -8988,6 +9075,76 @@ void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
}
+void SharedFunctionInfo::ClearOptimizedCodeMap() {
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+
+ // If the next map link slot is already used then the function was
+ // enqueued with code flushing and we remove it now.
+ if (!code_map->get(kNextMapIndex)->IsUndefined()) {
+ CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
+ flusher->EvictOptimizedCodeMap(this);
+ }
+
+ ASSERT(code_map->get(kNextMapIndex)->IsUndefined());
+ set_optimized_code_map(Smi::FromInt(0));
+}
+
+
+void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
+ const char* reason) {
+ if (optimized_code_map()->IsSmi()) return;
+
+ int i;
+ bool removed_entry = false;
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ for (i = kEntriesStart; i < code_map->length(); i += kEntryLength) {
+ ASSERT(code_map->get(i)->IsNativeContext());
+ if (Code::cast(code_map->get(i + 1)) == optimized_code) {
+ if (FLAG_trace_opt) {
+ PrintF("[evicting entry from optimizing code map (%s) for ", reason);
+ ShortPrint();
+ PrintF("]\n");
+ }
+ removed_entry = true;
+ break;
+ }
+ }
+ while (i < (code_map->length() - kEntryLength)) {
+ code_map->set(i, code_map->get(i + kEntryLength));
+ code_map->set(i + 1, code_map->get(i + 1 + kEntryLength));
+ code_map->set(i + 2, code_map->get(i + 2 + kEntryLength));
+ i += kEntryLength;
+ }
+ if (removed_entry) {
+ // Always trim even when array is cleared because of heap verifier.
+ RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), code_map, kEntryLength);
+ if (code_map->length() == kEntriesStart) {
+ ClearOptimizedCodeMap();
+ }
+ }
+}
+
+
+void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ ASSERT(shrink_by % kEntryLength == 0);
+ ASSERT(shrink_by <= code_map->length() - kEntriesStart);
+ // Always trim even when array is cleared because of heap verifier.
+ RightTrimFixedArray<FROM_GC>(GetHeap(), code_map, shrink_by);
+ if (code_map->length() == kEntriesStart) {
+ ClearOptimizedCodeMap();
+ }
+}
+
+
+void SharedFunctionInfo::ZapOptimizedCodeMap() {
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ MemsetPointer(code_map->data_start(),
+ GetHeap()->the_hole_value(),
+ code_map->length());
+}
+
+
bool JSFunction::CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
bool result = true;
@@ -9201,6 +9358,26 @@ Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
}
+bool JSFunction::PassesHydrogenFilter() {
+ String* name = shared()->DebugName();
+ if (*FLAG_hydrogen_filter != '\0') {
+ Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+ if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true;
+ if (filter[0] == '-' &&
+ !name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ return true;
+ }
+ if (filter[filter.length() - 1] == '*' &&
+ name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
+ return true;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+
MaybeObject* Oddball::Initialize(const char* to_string,
Object* to_number,
byte kind) {
@@ -9455,8 +9632,9 @@ void SharedFunctionInfo::DisableOptimization(const char* reason) {
code()->set_optimizable(false);
}
if (FLAG_trace_opt) {
- PrintF("[disabled optimization for %s, reason: %s]\n",
- *DebugName()->ToCString(), reason);
+ PrintF("[disabled optimization for ");
+ ShortPrint();
+ PrintF(", reason: %s]\n", reason);
}
}
@@ -9606,11 +9784,16 @@ int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
if (!value->IsSmi()) {
FixedArray* optimized_code_map = FixedArray::cast(value);
int length = optimized_code_map->length();
- for (int i = 0; i < length; i += 3) {
+ for (int i = kEntriesStart; i < length; i += kEntryLength) {
if (optimized_code_map->get(i) == native_context) {
return i + 1;
}
}
+ if (FLAG_trace_opt) {
+ PrintF("[didn't find optimized code in optimized code map for ");
+ ShortPrint();
+ PrintF("]\n");
+ }
}
return -1;
}
@@ -9692,9 +9875,10 @@ void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
VisitExternalReferences(p, p + 1);
}
-byte Code::compare_nil_state() {
+byte Code::compare_nil_types() {
ASSERT(is_compare_nil_ic_stub());
- return CompareNilICStub::TypesFromExtraICState(extended_extra_ic_state());
+ return CompareNilICStub::ExtractTypesFromExtraICState(
+ extended_extra_ic_state());
}
@@ -10031,12 +10215,15 @@ void Code::PrintDeoptLocation(int bailout_id) {
RelocInfo* info = it.rinfo();
if (info->rmode() == RelocInfo::COMMENT) {
last_comment = reinterpret_cast<const char*>(info->data());
- } else if (last_comment != NULL &&
- bailout_id == Deoptimizer::GetDeoptimizationId(
- GetIsolate(), info->target_address(), Deoptimizer::EAGER)) {
- CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
- PrintF(" %s\n", last_comment);
- return;
+ } else if (last_comment != NULL) {
+ if ((bailout_id == Deoptimizer::GetDeoptimizationId(
+ GetIsolate(), info->target_address(), Deoptimizer::EAGER)) ||
+ (bailout_id == Deoptimizer::GetDeoptimizationId(
+ GetIsolate(), info->target_address(), Deoptimizer::SOFT))) {
+ CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
+ PrintF(" %s\n", last_comment);
+ return;
+ }
}
}
}
@@ -10968,13 +11155,12 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- CustomArguments args(isolate, interceptor->data(), this, this);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = setter(index, v8::Utils::ToLocal(value_handle), info);
+ result = args.Call(setter, index, v8::Utils::ToLocal(value_handle));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *value_handle;
@@ -11011,13 +11197,13 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key = isolate->factory()->NumberToString(number);
LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
- CustomArguments args(isolate, data->data(), *self, *holder_handle);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments
+ args(isolate, data->data(), *self, *holder_handle);
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = call_fun(v8::Utils::ToLocal(key), info);
+ result = args.Call(call_fun, v8::Utils::ToLocal(key));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (result.IsEmpty()) return isolate->heap()->undefined_value();
@@ -11078,14 +11264,14 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key(isolate->factory()->NumberToString(number));
LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
- CustomArguments args(isolate, data->data(), *self, *holder_handle);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments
+ args(isolate, data->data(), *self, *holder_handle);
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- call_fun(v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle),
- info);
+ args.Call(call_fun,
+ v8::Utils::ToLocal(key),
+ v8::Utils::ToLocal(value_handle));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value_handle;
@@ -11151,6 +11337,18 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
ASSERT(HasFastSmiOrObjectElements() ||
HasFastArgumentsElements());
+ // Array optimizations rely on the prototype lookups of Array objects always
+ // returning undefined. If there is a store to the initial prototype object,
+ // make sure all of these optimizations are invalidated.
+ Isolate* isolate(GetIsolate());
+ if (isolate->is_initial_object_prototype(this) ||
+ isolate->is_initial_array_prototype(this)) {
+ HandleScope scope(GetIsolate());
+ map()->dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(),
+ DependentCode::kElementsCantBeAddedGroup);
+ }
+
FixedArray* backing_store = FixedArray::cast(elements());
if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
backing_store = FixedArray::cast(backing_store->get(1));
@@ -11957,13 +12155,13 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), receiver, this);
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = getter(index, info);
+ result = args.Call(getter, index);
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
@@ -12267,13 +12465,13 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
+ PropertyCallbackArguments
+ args(isolate, interceptor->data(), receiver, this);
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- result = getter(v8::Utils::ToLocal(name_handle), info);
+ result = args.Call(getter, v8::Utils::ToLocal(name_handle));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
@@ -13393,7 +13591,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
// Ordering is irrelevant, since we are going to sort anyway.
SeededNumberDictionary* dict = element_dictionary();
if (IsJSArray() || dict->requires_slow_elements() ||
- dict->max_number_key() >= limit) {
+ dict->max_number_key() >= limit || map()->is_observed()) {
return PrepareSlowElementsForSort(limit);
}
// Convert to fast elements.
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 1b4ed5b3b5..ac74162962 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -254,6 +254,7 @@ enum CreationFlag {
// Indicates whether transitions can be added to a source map or not.
enum TransitionFlag {
INSERT_TRANSITION,
+ OMIT_TRANSITION_KEEP_REPRESENTATIONS,
OMIT_TRANSITION
};
@@ -851,7 +852,7 @@ struct ValueInfo : public Malloced {
// A template-ized version of the IsXXX functions.
-template <class C> static inline bool Is(Object* obj);
+template <class C> inline bool Is(Object* obj);
#ifdef VERIFY_HEAP
#define DECLARE_VERIFIER(Name) void Name##Verify();
@@ -1066,6 +1067,13 @@ class Object : public MaybeObject {
return Representation::Smi();
} else if (FLAG_track_double_fields && IsHeapNumber()) {
return Representation::Double();
+ } else if (FLAG_track_heap_object_fields && !IsUndefined()) {
+ // Don't track undefined as heapobject because it's also used as temporary
+ // value for computed fields that may turn out to be Smi. That combination
+ // will go tagged, so go tagged immediately.
+ // TODO(verwaest): Change once we track computed boilerplate fields.
+ ASSERT(IsHeapObject());
+ return Representation::HeapObject();
} else {
return Representation::Tagged();
}
@@ -1076,6 +1084,8 @@ class Object : public MaybeObject {
return IsSmi();
} else if (FLAG_track_double_fields && representation.IsDouble()) {
return IsNumber();
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ return IsHeapObject();
}
return true;
}
@@ -2191,7 +2201,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* ConvertDescriptorToField(
Name* name,
Object* new_value,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ TransitionFlag flag = OMIT_TRANSITION);
MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation(
@@ -4611,7 +4622,7 @@ class Code: public HeapObject {
inline void set_to_boolean_state(byte value);
// [compare_nil]: For kind COMPARE_NIL_IC tells what state the stub is in.
- byte compare_nil_state();
+ byte compare_nil_types();
// [has_function_cache]: For kind STUB tells whether there is a function
// cache is passed to the stub.
@@ -4968,7 +4979,10 @@ class DependentCode: public FixedArray {
// described by this map changes shape (and transitions to a new map),
// possibly invalidating the assumptions embedded in the code.
kPrototypeCheckGroup,
- kGroupCount = kPrototypeCheckGroup + 1
+ // Group of code that depends on elements not being added to objects with
+ // this map.
+ kElementsCantBeAddedGroup,
+ kGroupCount = kElementsCantBeAddedGroup + 1
};
// Array for holding the index of the first code object of each group.
@@ -5365,9 +5379,8 @@ class Map: public HeapObject {
// Returns a non-deprecated version of the input. If the input was not
// deprecated, it is directly returned. Otherwise, the non-deprecated version
// is found by re-transitioning from the root of the transition tree using the
- // descriptor array of the map. New maps (and transitions) may be created if
- // no new (more general) version exists.
- static inline Handle<Map> CurrentMapForDeprecated(Handle<Map> map);
+ // descriptor array of the map. Returns NULL if no updated map is found.
+ Map* CurrentMapForDeprecated();
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
@@ -5500,6 +5513,8 @@ class Map: public HeapObject {
inline void AddDependentCode(DependentCode::DependencyGroup group,
Handle<Code> code);
+ bool IsMapInArrayPrototypeChain();
+
// Dispatched behavior.
DECLARE_PRINTER(Map)
DECLARE_VERIFIER(Map)
@@ -5793,7 +5808,7 @@ class SharedFunctionInfo: public HeapObject {
inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code
- // and a shared literals array or Smi 0 if none.
+ // and a shared literals array or Smi(0) if none.
DECL_ACCESSORS(optimized_code_map, Object)
// Returns index i of the entry with the specified context. At position
@@ -5806,14 +5821,34 @@ class SharedFunctionInfo: public HeapObject {
void InstallFromOptimizedCodeMap(JSFunction* function, int index);
// Clear optimized code map.
- inline void ClearOptimizedCodeMap();
+ void ClearOptimizedCodeMap();
+
+ // Removed a specific optimized code object from the optimized code map.
+ void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
+
+ // Trims the optimized code map after entries have been removed.
+ void TrimOptimizedCodeMap(int shrink_by);
+
+ // Zaps the contents of backing optimized code map.
+ void ZapOptimizedCodeMap();
// Add a new entry to the optimized code map.
+ MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context,
+ Code* code,
+ FixedArray* literals);
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
Handle<FixedArray> literals);
+
+ // Layout description of the optimized code map.
+ static const int kNextMapIndex = 0;
+ static const int kEntriesStart = 1;
static const int kEntryLength = 3;
+ static const int kFirstContextSlot = FixedArray::kHeaderSize + kPointerSize;
+ static const int kFirstCodeSlot = FixedArray::kHeaderSize + 2 * kPointerSize;
+ static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
+ static const int kInitialLength = kEntriesStart + kEntryLength;
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
@@ -6111,6 +6146,9 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that code for this function cannot be cached.
DECL_BOOLEAN_ACCESSORS(dont_cache)
+ // Indicates that code for this function cannot be flushed.
+ DECL_BOOLEAN_ACCESSORS(dont_flush)
+
// Indicates that this function is a generator.
DECL_BOOLEAN_ACCESSORS(is_generator)
@@ -6340,6 +6378,7 @@ class SharedFunctionInfo: public HeapObject {
kDontOptimize,
kDontInline,
kDontCache,
+ kDontFlush,
kIsGenerator,
kCompilerHintsCount // Pseudo entry
};
@@ -6660,6 +6699,8 @@ class JSFunction: public JSObject {
}
#endif
+ bool PassesHydrogenFilter();
+
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
static const int kCodeEntryOffset = JSObject::kHeaderSize;
@@ -7792,7 +7833,7 @@ class String: public Name {
// String equality operations.
inline bool Equals(String* other);
- bool IsUtf8EqualTo(Vector<const char> str);
+ bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false);
bool IsOneByteEqualTo(Vector<const uint8_t> str);
bool IsTwoByteEqualTo(Vector<const uc16> str);
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index cff51bc9c3..5eec342168 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -794,7 +794,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
}
-Handle<String> Parser::GetSymbol(bool* ok) {
+Handle<String> Parser::GetSymbol() {
int symbol_id = -1;
if (pre_parse_data() != NULL) {
symbol_id = pre_parse_data()->GetSymbolIdentifier();
@@ -1341,7 +1341,7 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// String
Expect(Token::STRING, CHECK_OK);
- Handle<String> symbol = GetSymbol(CHECK_OK);
+ Handle<String> symbol = GetSymbol();
// TODO(ES6): Request JS resource from environment...
@@ -3113,7 +3113,12 @@ Expression* Parser::ParseYieldExpression(bool* ok) {
Expression* generator_object = factory()->NewVariableProxy(
current_function_state_->generator_object_variable());
Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
- return factory()->NewYield(generator_object, expression, kind, position);
+ Yield* yield =
+ factory()->NewYield(generator_object, expression, kind, position);
+ if (kind == Yield::DELEGATING) {
+ yield->set_index(current_function_state_->NextHandlerIndex());
+ }
+ return yield;
}
@@ -3687,7 +3692,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::STRING: {
Consume(Token::STRING);
- Handle<String> symbol = GetSymbol(CHECK_OK);
+ Handle<String> symbol = GetSymbol();
result = factory()->NewLiteral(symbol);
if (fni_ != NULL) fni_->PushLiteralName(symbol);
break;
@@ -4042,7 +4047,7 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
if (is_keyword) {
name = isolate_->factory()->InternalizeUtf8String(Token::String(next));
} else {
- name = GetSymbol(CHECK_OK);
+ name = GetSymbol();
}
FunctionLiteral* value =
ParseFunctionLiteral(name,
@@ -4123,7 +4128,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
case Token::STRING: {
Consume(Token::STRING);
- Handle<String> string = GetSymbol(CHECK_OK);
+ Handle<String> string = GetSymbol();
if (fni_ != NULL) fni_->PushLiteralName(string);
uint32_t index;
if (!string.is_null() && string->AsArrayIndex(&index)) {
@@ -4145,7 +4150,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
default:
if (Token::IsKeyword(next)) {
Consume(next);
- Handle<String> string = GetSymbol(CHECK_OK);
+ Handle<String> string = GetSymbol();
key = factory()->NewLiteral(string);
} else {
// Unexpected token.
@@ -4818,7 +4823,7 @@ void Parser::ExpectSemicolon(bool* ok) {
void Parser::ExpectContextualKeyword(const char* keyword, bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return;
- Handle<String> symbol = GetSymbol(ok);
+ Handle<String> symbol = GetSymbol();
if (!*ok) return;
if (!symbol->IsUtf8EqualTo(CStrVector(keyword))) {
*ok = false;
@@ -4845,7 +4850,7 @@ Handle<String> Parser::ParseIdentifier(bool* ok) {
(top_scope_->is_classic_mode() &&
(next == Token::FUTURE_STRICT_RESERVED_WORD ||
(next == Token::YIELD && !is_generator())))) {
- return GetSymbol(ok);
+ return GetSymbol();
} else {
ReportUnexpectedToken(next);
*ok = false;
@@ -4869,7 +4874,7 @@ Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
*ok = false;
return Handle<String>();
}
- return GetSymbol(ok);
+ return GetSymbol();
}
@@ -4883,7 +4888,7 @@ Handle<String> Parser::ParseIdentifierName(bool* ok) {
*ok = false;
return Handle<String>();
}
- return GetSymbol(ok);
+ return GetSymbol();
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 1defbf2742..8a3ae92906 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -767,7 +767,7 @@ class Parser BASE_EMBEDDED {
}
}
- Handle<String> GetSymbol(bool* ok);
+ Handle<String> GetSymbol();
// Get odd-ball literals.
Literal* GetLiteralUndefined();
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 054d5b5a50..f76ec44332 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -115,26 +115,11 @@ void* OS::GetRandomMmapAddr() {
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
uint32_t raw_addr = V8::RandomPrivate(isolate);
-
- raw_addr &= 0x3ffff000;
-
-# ifdef __sun
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- raw_addr += 0x80000000;
-# else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
+ raw_addr &= 0x3ffff000;
raw_addr += 0x20000000;
-# endif
#endif
return reinterpret_cast<void*>(raw_addr);
}
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 64dabf62b7..d6e8a3775b 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -55,7 +55,8 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
resource_name_(resource_name),
line_number_(line_number),
shared_id_(0),
- security_token_id_(security_token_id) {
+ security_token_id_(security_token_id),
+ no_frame_ranges_(NULL) {
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index b1b163b50e..d923bc04e8 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -29,6 +29,7 @@
#include "profile-generator-inl.h"
+#include "compiler.h"
#include "global-handles.h"
#include "scopeinfo.h"
#include "unicode.h"
@@ -186,6 +187,11 @@ size_t StringsStorage::GetUsedMemorySize() const {
const char* const CodeEntry::kEmptyNamePrefix = "";
+CodeEntry::~CodeEntry() {
+ delete no_frame_ranges_;
+}
+
+
void CodeEntry::CopyData(const CodeEntry& source) {
tag_ = source.tag_;
name_prefix_ = source.name_prefix_;
@@ -531,13 +537,17 @@ void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
}
-CodeEntry* CodeMap::FindEntry(Address addr) {
+CodeEntry* CodeMap::FindEntry(Address addr, Address* start) {
CodeTree::Locator locator;
if (tree_.FindGreatestLessThan(addr, &locator)) {
// locator.key() <= addr. Need to check that addr is within entry.
const CodeEntryInfo& entry = locator.value();
- if (addr < (locator.key() + entry.size))
+ if (addr < (locator.key() + entry.size)) {
+ if (start) {
+ *start = locator.key();
+ }
return entry.entry;
+ }
}
return NULL;
}
@@ -898,7 +908,26 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
CodeEntry** entry = entries.start();
memset(entry, 0, entries.length() * sizeof(*entry));
if (sample.pc != NULL) {
- *entry++ = code_map_.FindEntry(sample.pc);
+ Address start;
+ CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
+ // If pc is in the function code before it set up stack frame or after the
+ // frame was destroyed SafeStackTraceFrameIterator incorrectly thinks that
+ // ebp contains return address of the current function and skips caller's
+ // frame. Check for this case and just skip such samples.
+ if (pc_entry) {
+ List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
+ if (ranges) {
+ Code* code = Code::cast(HeapObject::FromAddress(start));
+ int pc_offset = static_cast<int>(sample.pc - code->instruction_start());
+ for (int i = 0; i < ranges->length(); i++) {
+ OffsetRange& range = ranges->at(i);
+ if (range.from <= pc_offset && pc_offset < range.to) {
+ return;
+ }
+ }
+ }
+ }
+ *entry++ = pc_entry;
if (sample.has_external_callback) {
// Don't use PC when in external callback code, as it can point
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 761291e121..37cc57d2d2 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -35,6 +35,8 @@
namespace v8 {
namespace internal {
+struct OffsetRange;
+
class TokenEnumerator {
public:
TokenEnumerator();
@@ -100,6 +102,7 @@ class CodeEntry {
const char* resource_name,
int line_number,
int security_token_id));
+ ~CodeEntry();
INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
INLINE(const char* name_prefix() const) { return name_prefix_; }
@@ -112,6 +115,11 @@ class CodeEntry {
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
+ List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; }
+ void set_no_frame_ranges(List<OffsetRange>* ranges) {
+ no_frame_ranges_ = ranges;
+ }
+
void CopyData(const CodeEntry& source);
uint32_t GetCallUid() const;
bool IsSameAs(CodeEntry* entry) const;
@@ -126,6 +134,7 @@ class CodeEntry {
int line_number_;
int shared_id_;
int security_token_id_;
+ List<OffsetRange>* no_frame_ranges_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@@ -251,7 +260,7 @@ class CodeMap {
CodeMap() : next_shared_id_(1) { }
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
- CodeEntry* FindEntry(Address addr);
+ CodeEntry* FindEntry(Address addr, Address* start = NULL);
int GetSharedId(Address addr);
void Print();
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 62140fe962..674fc8869a 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -83,6 +83,7 @@ class Representation {
kSmi,
kInteger32,
kDouble,
+ kHeapObject,
kTagged,
kExternal,
kNumRepresentations
@@ -95,6 +96,7 @@ class Representation {
static Representation Smi() { return Representation(kSmi); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
+ static Representation HeapObject() { return Representation(kHeapObject); }
static Representation External() { return Representation(kExternal); }
static Representation FromKind(Kind kind) { return Representation(kind); }
@@ -111,6 +113,7 @@ class Representation {
bool is_more_general_than(const Representation& other) const {
ASSERT(kind_ != kExternal);
ASSERT(other.kind_ != kExternal);
+ if (IsHeapObject()) return other.IsDouble();
return kind_ > other.kind_;
}
@@ -119,11 +122,9 @@ class Representation {
}
Representation generalize(Representation other) {
- if (is_more_general_than(other)) {
- return *this;
- } else {
- return other;
- }
+ if (other.fits_into(*this)) return *this;
+ if (other.is_more_general_than(*this)) return other;
+ return Representation::Tagged();
}
Kind kind() const { return static_cast<Kind>(kind_); }
@@ -132,6 +133,7 @@ class Representation {
bool IsSmi() const { return kind_ == kSmi; }
bool IsInteger32() const { return kind_ == kInteger32; }
bool IsDouble() const { return kind_ == kDouble; }
+ bool IsHeapObject() const { return kind_ == kHeapObject; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
return kind_ == kInteger32 || kind_ == kDouble;
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 95e86b119c..c4b79b11b5 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -122,10 +122,9 @@ static void GetICCounts(JSFunction* function,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable());
- if (FLAG_trace_opt) {
+ if (FLAG_trace_opt && function->PassesHydrogenFilter()) {
PrintF("[marking ");
- function->PrintName();
- PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
+ function->ShortPrint();
PrintF(" for recompilation, reason: %s", reason);
if (FLAG_type_info_threshold > 0) {
int typeinfo, total, percentage;
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 4e00b2941d..61b3549dde 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -676,7 +676,7 @@ bool Runtime::SetupArrayBuffer(Isolate* isolate,
array_buffer->set_backing_store(data);
Handle<Object> byte_length =
- isolate->factory()->NewNumber(static_cast<double>(allocated_length));
+ isolate->factory()->NewNumberFromSize(allocated_length);
CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber());
array_buffer->set_byte_length(*byte_length);
return true;
@@ -795,51 +795,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
CONVERT_ARG_HANDLE_CHECKED(Object, byte_length_object, 4);
ExternalArrayType arrayType;
- ElementsKind elementsKind;
size_t elementSize;
switch (arrayId) {
case ARRAY_ID_UINT8:
- elementsKind = EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
arrayType = kExternalUnsignedByteArray;
elementSize = 1;
break;
case ARRAY_ID_INT8:
- elementsKind = EXTERNAL_BYTE_ELEMENTS;
arrayType = kExternalByteArray;
elementSize = 1;
break;
case ARRAY_ID_UINT16:
- elementsKind = EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
arrayType = kExternalUnsignedShortArray;
elementSize = 2;
break;
case ARRAY_ID_INT16:
- elementsKind = EXTERNAL_SHORT_ELEMENTS;
arrayType = kExternalShortArray;
elementSize = 2;
break;
case ARRAY_ID_UINT32:
- elementsKind = EXTERNAL_UNSIGNED_INT_ELEMENTS;
arrayType = kExternalUnsignedIntArray;
elementSize = 4;
break;
case ARRAY_ID_INT32:
- elementsKind = EXTERNAL_INT_ELEMENTS;
arrayType = kExternalIntArray;
elementSize = 4;
break;
case ARRAY_ID_FLOAT32:
- elementsKind = EXTERNAL_FLOAT_ELEMENTS;
arrayType = kExternalFloatArray;
elementSize = 4;
break;
case ARRAY_ID_FLOAT64:
- elementsKind = EXTERNAL_DOUBLE_ELEMENTS;
arrayType = kExternalDoubleArray;
elementSize = 8;
break;
case ARRAY_ID_UINT8C:
- elementsKind = EXTERNAL_PIXEL_ELEMENTS;
arrayType = kExternalPixelArray;
elementSize = 1;
break;
@@ -857,16 +847,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
ASSERT(byte_length % elementSize == 0);
size_t length = byte_length / elementSize;
- Handle<Object> length_obj =
- isolate->factory()->NewNumber(static_cast<double>(length));
+ Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
+
Handle<ExternalArray> elements =
isolate->factory()->NewExternalArray(
static_cast<int>(length), arrayType,
static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- Handle<Map> map =
- isolate->factory()->GetElementsTransitionMap(holder, elementsKind);
- holder->set_map(*map);
holder->set_elements(*elements);
return isolate->heap()->undefined_value();
}
@@ -2498,6 +2485,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
return Failure::Exception();
}
+ // Mark both, the source and the target, as un-flushable because the
+ // shared unoptimized code makes them impossible to enqueue in a list.
+ ASSERT(target_shared->code()->gc_metadata() == NULL);
+ ASSERT(source_shared->code()->gc_metadata() == NULL);
+ target_shared->set_dont_flush(true);
+ source_shared->set_dont_flush(true);
+
// Set the code, scope info, formal parameter count, and the length
// of the target shared function info. Set the source code of the
// target function to undefined. SetCode is only used for built-in
@@ -7966,7 +7960,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
JavaScriptFrame* frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
- RUNTIME_ASSERT(type != Deoptimizer::EAGER || function->IsOptimized());
+ Handle<Code> optimized_code(function->code());
+ RUNTIME_ASSERT((type != Deoptimizer::EAGER &&
+ type != Deoptimizer::SOFT) || function->IsOptimized());
// Avoid doing too much work when running with --always-opt and keep
// the optimized code around.
@@ -8003,8 +7999,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
} else {
Deoptimizer::DeoptimizeFunction(*function);
}
- // Flush optimized code cache for this function.
- function->shared()->ClearOptimizedCodeMap();
+ // Evict optimized code for this function from the cache so that it doesn't
+ // get used for new closures.
+ function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
+ "notify deoptimized");
return isolate->heap()->undefined_value();
}
@@ -8993,7 +8991,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
NoHandleAllocation ha(isolate);
ASSERT(args.length() == 0);
- isolate->PrintStack();
+ isolate->PrintStack(stdout);
return isolate->heap()->undefined_value();
}
@@ -9216,26 +9214,6 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNewFunctionAttributes) {
- // This utility adjusts the property attributes for newly created Function
- // object ("new Function(...)") by changing the map.
- // All it does is changing the prototype property to enumerable
- // as specified in ECMA262, 15.3.5.2.
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
-
- Handle<Map> map = func->shared()->is_classic_mode()
- ? isolate->function_instance_map()
- : isolate->strict_mode_function_instance_map();
-
- ASSERT(func->map()->instance_type() == map->instance_type());
- ASSERT(func->map()->instance_size() == map->instance_size());
- func->set_map(*map);
- return *func;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
// Allocate a block of memory in NewSpace (filled with a filler).
// Use as fallback for allocation in generated code when NewSpace
@@ -13121,7 +13099,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
ASSERT(args.length() == 2);
OS::PrintError("abort: %s\n",
reinterpret_cast<char*>(args[0]) + args.smi_at(1));
- isolate->PrintStack();
+ isolate->PrintStack(stderr);
OS::Abort();
UNREACHABLE();
return NULL;
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index c91fee6ec8..a37c851e81 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -100,7 +100,6 @@ namespace internal {
F(GetOptimizationStatus, 1, 1) \
F(GetOptimizationCount, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
- F(SetNewFunctionAttributes, 1, 1) \
F(AllocateInNewSpace, 1, 1) \
F(AllocateInOldPointerSpace, 1, 1) \
F(SetNativeFlag, 1, 1) \
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index 0ba8f52d44..88b4afe115 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -187,6 +187,31 @@ class StringStream {
};
+// Utility class to print a list of items to a stream, divided by a separator.
+class SimpleListPrinter {
+ public:
+ explicit SimpleListPrinter(StringStream* stream, char separator = ',') {
+ separator_ = separator;
+ stream_ = stream;
+ first_ = true;
+ }
+
+ void Add(const char* str) {
+ if (first_) {
+ first_ = false;
+ } else {
+ stream_->Put(separator_);
+ }
+ stream_->Add(str);
+ }
+
+ private:
+ bool first_;
+ char separator_;
+ StringStream* stream_;
+};
+
+
} } // namespace v8::internal
#endif // V8_STRING_STREAM_H_
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 4cae85b022..44315bba16 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -170,7 +170,6 @@ function StringLocaleCompare(other) {
throw MakeTypeError("called_on_null_or_undefined",
["String.prototype.localeCompare"]);
}
- if (%_ArgumentsLength() === 0) return 0;
return %StringLocaleCompare(TO_STRING_INLINE(this),
TO_STRING_INLINE(other));
}
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index f43c9acee3..4e3a906be9 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -907,9 +907,8 @@ Handle<Code> StubCache::ComputeCallMiss(int argc,
Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
- NilValue nil,
- CompareNilICStub::Types types) {
- CompareNilICStub stub(kNonStrictEquality, nil, types);
+ CompareNilICStub& stub) {
+ stub.SetKind(kNonStrictEquality);
Handle<String> name(isolate_->heap()->empty_string());
if (!receiver_map->is_shared()) {
@@ -1035,10 +1034,12 @@ void StubCache::Clear() {
Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
for (int i = 0; i < kPrimaryTableSize; i++) {
primary_[i].key = heap()->empty_string();
+ primary_[i].map = NULL;
primary_[i].value = empty;
}
for (int j = 0; j < kSecondaryTableSize; j++) {
secondary_[j].key = heap()->empty_string();
+ secondary_[j].map = NULL;
secondary_[j].value = empty;
}
}
@@ -1105,13 +1106,13 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
Handle<String> str = Handle<String>::cast(name);
LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
- CustomArguments custom_args(isolate, callback->data(), recv, recv);
- v8::AccessorInfo info(custom_args.end());
+ PropertyCallbackArguments
+ custom_args(isolate, callback->data(), recv, recv);
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, setter_address);
- fun(v8::Utils::ToLocal(str), v8::Utils::ToLocal(value), info);
+ custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
@@ -1129,13 +1130,13 @@ static const int kAccessorInfoOffsetInInterceptorArgs = 2;
* provide any value for the given name.
*/
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
+ typedef PropertyCallbackArguments PCA;
+ static const int kArgsOffset = kAccessorInfoOffsetInInterceptorArgs;
Handle<Name> name_handle = args.at<Name>(0);
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
- ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
- ASSERT(args[2]->IsJSObject()); // Receiver.
- ASSERT(args[3]->IsJSObject()); // Holder.
- ASSERT(args[5]->IsSmi()); // Isolate.
- ASSERT(args.length() == 6);
+ ASSERT(kArgsOffset == 2);
+ // No ReturnValue in interceptors.
+ ASSERT(args.length() == kArgsOffset + PCA::kArgsLength - 1);
// TODO(rossberg): Support symbols in the API.
if (name_handle->IsSymbol())
@@ -1147,16 +1148,22 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
ASSERT(getter != NULL);
+ Handle<JSObject> receiver =
+ args.at<JSObject>(kArgsOffset - PCA::kThisIndex);
+ Handle<JSObject> holder =
+ args.at<JSObject>(kArgsOffset - PCA::kHolderIndex);
+ PropertyCallbackArguments callback_args(isolate,
+ interceptor_info->data(),
+ *receiver,
+ *holder);
{
// Use the interceptor getter.
- v8::AccessorInfo info(args.arguments() -
- kAccessorInfoOffsetInInterceptorArgs);
HandleScope scope(isolate);
v8::Handle<v8::Value> r;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- r = getter(v8::Utils::ToLocal(name), info);
+ r = callback_args.Call(getter, v8::Utils::ToLocal(name));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
@@ -1190,12 +1197,17 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
static MaybeObject* LoadWithInterceptor(Arguments* args,
PropertyAttributes* attrs) {
+ typedef PropertyCallbackArguments PCA;
+ static const int kArgsOffset = kAccessorInfoOffsetInInterceptorArgs;
Handle<Name> name_handle = args->at<Name>(0);
Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
- ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
- Handle<JSObject> receiver_handle = args->at<JSObject>(2);
- Handle<JSObject> holder_handle = args->at<JSObject>(3);
- ASSERT(args->length() == 6);
+ ASSERT(kArgsOffset == 2);
+ // No ReturnValue in interceptors.
+ ASSERT(args->length() == kArgsOffset + PCA::kArgsLength - 1);
+ Handle<JSObject> receiver_handle =
+ args->at<JSObject>(kArgsOffset - PCA::kThisIndex);
+ Handle<JSObject> holder_handle =
+ args->at<JSObject>(kArgsOffset - PCA::kHolderIndex);
Isolate* isolate = receiver_handle->GetIsolate();
@@ -1210,16 +1222,18 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
ASSERT(getter != NULL);
+ PropertyCallbackArguments callback_args(isolate,
+ interceptor_info->data(),
+ *receiver_handle,
+ *holder_handle);
{
// Use the interceptor getter.
- v8::AccessorInfo info(args->arguments() -
- kAccessorInfoOffsetInInterceptorArgs);
HandleScope scope(isolate);
v8::Handle<v8::Value> r;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
- r = getter(v8::Utils::ToLocal(name), info);
+ r = callback_args.Call(getter, v8::Utils::ToLocal(name));
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index cbaeacee80..9365d96de0 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -281,8 +281,7 @@ class StubCache {
// ---
Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
- NilValue nil,
- CompareNilICStub::Types types);
+ CompareNilICStub& stub);
// ---
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 1757bee767..53866c16cb 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -105,6 +105,8 @@ bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
if (!preliminary_checks) return false;
Map* map = code->FindFirstMap();
+ if (map == NULL) return false;
+ map = map->CurrentMapForDeprecated();
return map != NULL && !CanRetainOtherContext(map, *native_context_);
}
return false;
@@ -136,6 +138,8 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
if (!preliminary_checks) return false;
Map* map = code->FindFirstMap();
+ if (map == NULL) return false;
+ map = map->CurrentMapForDeprecated();
return map != NULL && !CanRetainOtherContext(map, *native_context_);
}
return false;
@@ -192,11 +196,10 @@ Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* first_map = code->FindFirstMap();
- ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *native_context_)
+ Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
+ return map == NULL || CanRetainOtherContext(map, *native_context_)
? Handle<Map>::null()
- : Handle<Map>(first_map);
+ : Handle<Map>(map);
}
return Handle<Map>::cast(map_or_code);
}
@@ -208,11 +211,10 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* first_map = code->FindFirstMap();
- ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *native_context_)
+ Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
+ return map == NULL || CanRetainOtherContext(map, *native_context_)
? Handle<Map>::null()
- : Handle<Map>(first_map);
+ : Handle<Map>(map);
}
return Handle<Map>::cast(map_or_code);
}
@@ -222,8 +224,15 @@ Handle<Map> TypeFeedbackOracle::CompareNilMonomorphicReceiverType(
TypeFeedbackId id) {
Handle<Object> maybe_code = GetInfo(id);
if (maybe_code->IsCode()) {
- Map* first_map = Handle<Code>::cast(maybe_code)->FindFirstMap();
- if (first_map != NULL) return Handle<Map>(first_map);
+ Map* map = Handle<Code>::cast(maybe_code)->FindFirstMap();
+ if (map == NULL) return Handle<Map>();
+ map = map->CurrentMapForDeprecated();
+ return map == NULL || CanRetainOtherContext(map, *native_context_)
+ ? Handle<Map>()
+ : Handle<Map>(map);
+ } else if (maybe_code->IsMap()) {
+ ASSERT(!Handle<Map>::cast(maybe_code)->is_deprecated());
+ return Handle<Map>::cast(maybe_code);
}
return Handle<Map>();
}
@@ -426,11 +435,10 @@ Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
if (state != CompareIC::KNOWN_OBJECT) {
return Handle<Map>::null();
}
- Map* first_map = code->FindFirstMap();
- ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *native_context_)
+ Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
+ return map == NULL || CanRetainOtherContext(map, *native_context_)
? Handle<Map>::null()
- : Handle<Map>(first_map);
+ : Handle<Map>(map);
}
@@ -637,9 +645,9 @@ byte TypeFeedbackOracle::CompareNilTypes(TypeFeedbackId id) {
Handle<Object> object = GetInfo(id);
if (object->IsCode() &&
Handle<Code>::cast(object)->is_compare_nil_ic_stub()) {
- return Handle<Code>::cast(object)->compare_nil_state();
+ return Handle<Code>::cast(object)->compare_nil_types();
} else {
- return CompareNilICStub::kFullCompare;
+ return CompareNilICStub::Types::FullCompare().ToIntegral();
}
}
@@ -717,7 +725,8 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
SetInfo(ast_id, static_cast<Object*>(target));
} else if (!CanRetainOtherContext(Map::cast(map),
*native_context_)) {
- SetInfo(ast_id, map);
+ Map* feedback = Map::cast(map)->CurrentMapForDeprecated();
+ if (feedback != NULL) SetInfo(ast_id, feedback);
}
}
} else {
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index b2c2ff1098..93cded18bc 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -242,42 +242,52 @@ inline int StrLength(const char* string) {
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
-template<class T, int shift, int size>
-class BitField {
+
+template<class T, int shift, int size, class U>
+class BitFieldBase {
public:
- // A uint32_t mask of bit field. To use all bits of a uint32 in a
- // bitfield without compiler warnings we have to compute 2^32 without
- // using a shift count of 32.
- static const uint32_t kMask = ((1U << shift) << size) - (1U << shift);
- static const uint32_t kShift = shift;
- static const uint32_t kSize = size;
+ // A type U mask of bit field. To use all bits of a type U of x bits
+ // in a bitfield without compiler warnings we have to compute 2^x
+ // without using a shift count of x in the computation.
+ static const U kOne = static_cast<U>(1U);
+ static const U kMask = ((kOne << shift) << size) - (kOne << shift);
+ static const U kShift = shift;
+ static const U kSize = size;
// Value for the field with all bits set.
static const T kMax = static_cast<T>((1U << size) - 1);
// Tells whether the provided value fits into the bit field.
static bool is_valid(T value) {
- return (static_cast<uint32_t>(value) & ~static_cast<uint32_t>(kMax)) == 0;
+ return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0;
}
- // Returns a uint32_t with the bit field value encoded.
- static uint32_t encode(T value) {
+ // Returns a type U with the bit field value encoded.
+ static U encode(T value) {
ASSERT(is_valid(value));
- return static_cast<uint32_t>(value) << shift;
+ return static_cast<U>(value) << shift;
}
- // Returns a uint32_t with the bit field value updated.
- static uint32_t update(uint32_t previous, T value) {
+ // Returns a type U with the bit field value updated.
+ static U update(U previous, T value) {
return (previous & ~kMask) | encode(value);
}
// Extracts the bit field from the value.
- static T decode(uint32_t value) {
+ static T decode(U value) {
return static_cast<T>((value & kMask) >> shift);
}
};
+template<class T, int shift, int size>
+class BitField : public BitFieldBase<T, shift, size, uint32_t> { };
+
+
+template<class T, int shift, int size>
+class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
+
+
// ----------------------------------------------------------------------------
// Hash function.
@@ -1030,6 +1040,7 @@ class EnumSet {
void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
T ToIntegral() const { return bits_; }
bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
+ bool operator!=(const EnumSet& set) { return bits_ != set.bits_; }
EnumSet<E, T> operator|(const EnumSet& set) const {
return EnumSet<E, T>(bits_ | set.bits_);
}
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 274128ed4c..e21c815ff5 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -281,6 +281,7 @@ void V8::InitializeOncePerProcessImpl() {
LOperand::SetUpCaches();
SetUpJSCallerSavedCodeData();
ExternalReference::SetUp();
+ Bootstrapper::InitializeOncePerProcess();
}
void V8::InitializeOncePerProcess() {
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 9266af6f0c..b2ea749c73 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -1356,6 +1356,7 @@ function ObjectConstructor(x) {
function SetUpObject() {
%CheckIsBootstrapping();
+ %SetNativeFlag($Object);
%SetCode($Object, ObjectConstructor);
%FunctionSetName(ObjectPoisonProto, "__proto__");
%FunctionRemovePrototype(ObjectPoisonProto);
@@ -1766,7 +1767,9 @@ function NewFunction(arg1) { // length == 1
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
// compile. We avoid this problem by checking for this early on.
- if (p.indexOf(')') != -1) throw MakeSyntaxError('paren_in_arg_string',[]);
+ if (%_CallFunction(p, ')', StringIndexOf) != -1) {
+ throw MakeSyntaxError('paren_in_arg_string',[]);
+ }
// If the formal parameters include an unbalanced block comment, the
// function must be rejected. Since JavaScript does not allow nested
// comments we can include a trailing block comment to catch this.
@@ -1775,13 +1778,11 @@ function NewFunction(arg1) { // length == 1
var body = (n > 0) ? ToString(%_Arguments(n - 1)) : '';
var source = '(function(' + p + ') {\n' + body + '\n})';
- // The call to SetNewFunctionAttributes will ensure the prototype
- // property of the resulting function is enumerable (ECMA262, 15.3.5.2).
var global_receiver = %GlobalReceiver(global);
var f = %_CallFunction(global_receiver, %CompileString(source, true));
%FunctionMarkNameShouldPrintAsAnonymous(f);
- return %SetNewFunctionAttributes(f);
+ return f;
}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index baafcf7f3b..cec040ae46 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 19
-#define BUILD_NUMBER 0
+#define BUILD_NUMBER 3
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index e7daa7f9d6..b1b99b473b 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -729,6 +729,11 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index f7ded184ec..f87d952568 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -287,8 +287,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into edx.
- __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
- __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
+ __ movq(rdx, FieldOperand(rbx, SharedFunctionInfo::kFirstCodeSlot));
+ __ cmpq(rcx, FieldOperand(rbx, SharedFunctionInfo::kFirstContextSlot));
__ j(equal, &install_optimized);
// Iterate through the rest of map backwards. rdx holds an index.
@@ -298,9 +298,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ SmiToInteger32(rdx, rdx);
__ bind(&loop);
// Do not double check first entry.
- __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
+ __ cmpq(rdx, Immediate(SharedFunctionInfo::kSecondEntryIndex));
__ j(equal, &restore);
- __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry.
+ __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
__ cmpq(rcx, FieldOperand(rbx,
rdx,
times_pointer_size,
@@ -1272,6 +1272,17 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
}
+static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
+ MacroAssembler* masm) {
+ // Push arguments, but ensure they are under the return address
+ // for a tail call.
+ __ pop(rcx);
+ __ push(rdx);
+ __ push(rax);
+ __ push(rcx);
+}
+
+
void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
Label left_not_string, call_runtime;
@@ -1284,8 +1295,9 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ JumpIfSmi(left, &left_not_string, Label::kNear);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
+ StringAddStub string_add_left_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
+ BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
@@ -1294,8 +1306,9 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &call_runtime, Label::kNear);
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
+ StringAddStub string_add_right_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
+ BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
__ TailCallStub(&string_add_right_stub);
// Neither argument is a string.
@@ -1322,8 +1335,12 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (call_runtime.is_linked()) {
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
}
@@ -1356,8 +1373,9 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &call_runtime);
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
+ StringAddStub string_add_stub((StringAddFlags)
+ (ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
+ BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
__ TailCallStub(&string_add_stub);
__ bind(&call_runtime);
@@ -1442,8 +1460,12 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&gc_required);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -1462,8 +1484,12 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
}
@@ -1507,10 +1533,8 @@ static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(rcx);
__ push(rdx);
__ push(rax);
- __ push(rcx);
}
@@ -4791,7 +4815,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
+ if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
__ JumpIfSmi(rax, &call_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
__ j(above_equal, &call_runtime);
@@ -5068,15 +5092,53 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if ((flags_ & ERECT_FRAME) != 0) {
+ GenerateRegisterArgsPop(masm, rcx);
+ // Build a frame
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ __ CallRuntime(Runtime::kStringAdd, 2);
+ }
+ __ Ret();
+ } else {
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ }
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ if ((flags_ & ERECT_FRAME) != 0) {
+ GenerateRegisterArgsPop(masm, rcx);
+ // Build a frame
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
+ }
+ __ Ret();
+ } else {
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
}
}
+void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ push(rax);
+ __ push(rdx);
+}
+
+
+void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
+ Register temp) {
+ __ pop(temp);
+ __ pop(rdx);
+ __ pop(rax);
+ __ push(temp);
+}
+
+
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index eafb960255..f6cfad048e 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -207,11 +207,13 @@ class StringHelper : public AllStatic {
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_ADD_FLAGS = 1 << 0,
// Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
// Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
+ // Stub needs a frame before calling the runtime
+ ERECT_FRAME = 1 << 3,
// Omit both string checks in stub.
NO_STRING_CHECK_IN_STUB =
NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
@@ -236,6 +238,9 @@ class StringAddStub: public PlatformCodeStub {
Register scratch3,
Label* slow);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateRegisterArgsPop(MacroAssembler* masm, Register temp);
+
const StringAddFlags flags_;
};
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index bec158b5b2..a579f52e7b 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -55,13 +55,12 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
// Get the optimized code.
Code* code = function->code();
+ // The optimized code is going to be patched, so we cannot use it any more.
+ function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function");
+
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
@@ -268,7 +267,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
+ PrintFunctionName();
PrintF(" => node=%u, frame=%d->%d]\n",
ast_id,
input_frame_size,
@@ -363,183 +362,12 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
+ PrintFunctionName();
PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
}
}
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- intptr_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(rsi.code(), value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Code* continuation = (bailout_type_ == EAGER)
- ? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
- : isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -581,6 +409,12 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on x64 in the input frame.
+ return false;
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
@@ -619,7 +453,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the address of the location in the code object if possible
// and compute the fp-to-sp delta in register arg5.
- if (type() == EAGER) {
+ if (type() == EAGER || type() == SOFT) {
__ Set(arg_reg_4, 0);
__ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
} else {
@@ -670,7 +504,7 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Remove the bailout id from the stack.
- if (type() == EAGER) {
+ if (type() == EAGER || type() == SOFT) {
__ addq(rsp, Immediate(kPointerSize));
} else {
__ addq(rsp, Immediate(2 * kPointerSize));
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 1d6adfdc21..a811a34ba9 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -43,6 +43,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) {
}
+Register JavaScriptFrame::fp_register() { return rbp; }
+Register JavaScriptFrame::context_register() { return rsi; }
+
+
Register StubFailureTrampolineFrame::fp_register() { return rbp; }
Register StubFailureTrampolineFrame::context_register() { return rsi; }
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 19fa0aaddd..22c6fae187 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -157,6 +157,7 @@ void FullCodeGenerator::Generate() {
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
__ push(rdi); // Callee's JS Function.
+ info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -406,6 +407,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// patch with the code required by the debugger.
__ movq(rsp, rbp);
__ pop(rbp);
+ int no_frame_start = masm_->pc_offset();
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, rcx);
@@ -423,6 +425,7 @@ void FullCodeGenerator::EmitReturnSequence() {
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -1946,8 +1949,96 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
- case Yield::DELEGATING:
- UNIMPLEMENTED();
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ jmp(&l_send);
+
+ // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize)); // iter
+ __ push(rcx); // iter
+ __ push(rax); // exception
+ __ movq(rax, rcx); // iter
+ __ LoadRoot(rcx, Heap::kthrow_stringRootIndex); // "throw"
+ Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(throw_ic); // iter.throw in rax
+ __ jmp(&l_call);
+
+ // try { received = yield result.value }
+ __ bind(&l_try);
+ __ pop(rax); // result.value
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(rax); // result.value
+ __ push(Operand(rsp, (0 + 1) * kPointerSize + handler_size)); // g
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ movq(context_register(),
+ Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &l_resume);
+ EmitReturnIteratorResult(false);
+ __ bind(&l_resume); // received in rax
+ __ PopTryHandler();
+
+ // receiver = iter; f = iter.send; arg = received;
+ __ bind(&l_send);
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize)); // iter
+ __ push(rcx); // iter
+ __ push(rax); // received
+ __ movq(rax, rcx); // iter
+ __ LoadRoot(rcx, Heap::ksend_stringRootIndex); // "send"
+ Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(send_ic); // iter.send in rax
+
+ // result = f.call(receiver, arg);
+ __ bind(&l_call);
+ Label l_call_runtime;
+ __ JumpIfSmi(rax, &l_call_runtime);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ j(not_equal, &l_call_runtime);
+ __ movq(rdi, rax);
+ ParameterCount count(1);
+ __ InvokeFunction(rdi, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ jmp(&l_loop);
+ __ bind(&l_call_runtime);
+ __ push(rax);
+ __ CallRuntime(Runtime::kCall, 3);
+
+ // val = result.value; if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ // result.value
+ __ push(rax); // save result
+ __ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
+ Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(value_ic); // result.value in rax
+ __ pop(rbx); // result
+ __ push(rax); // result.value
+ __ movq(rax, rbx); // result
+ __ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
+ Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
+ CallIC(done_ic); // result.done in rax
+ ToBooleanStub stub(rax);
+ __ push(rax);
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ j(zero, &l_try);
+
+ // result.value
+ __ pop(rax); // result.value
+ context()->DropAndPlug(2, rax); // drop iter and g
+ break;
+ }
}
}
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 761e05ae46..6425f89416 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1543,7 +1543,7 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
__ push(rbx); // return address
// Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index f908ea1ffc..9a1ce98009 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -100,6 +100,12 @@ void LCodeGen::FinishCode(Handle<Code> code) {
transition_maps_.at(i)->AddDependentCode(
DependentCode::kTransitionGroup, code);
}
+ if (graph()->depends_on_empty_array_proto_elements()) {
+ isolate()->initial_object_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ isolate()->initial_array_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ }
}
@@ -168,6 +174,7 @@ bool LCodeGen::GeneratePrologue() {
} else {
__ push(rdi); // Callee's JS function.
}
+ info()->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -295,9 +302,7 @@ bool LCodeGen::GenerateJumpTable() {
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
Address entry = jump_table_[i].address;
- bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
@@ -306,7 +311,7 @@ bool LCodeGen::GenerateJumpTable() {
}
if (jump_table_[i].needs_frame) {
__ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
- if (is_lazy_deopt) {
+ if (type == Deoptimizer::LAZY) {
if (needs_frame_is_call.is_bound()) {
__ jmp(&needs_frame_is_call);
} else {
@@ -342,7 +347,7 @@ bool LCodeGen::GenerateJumpTable() {
}
}
} else {
- if (is_lazy_deopt) {
+ if (type == Deoptimizer::LAZY) {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
@@ -713,14 +718,13 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -753,8 +757,10 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
if (jump_table_.is_empty() ||
jump_table_.last().address != entry ||
jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ jump_table_.last().bailout_type != bailout_type) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {
@@ -766,6 +772,21 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
}
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(cc, environment, bailout_type);
+}
+
+
+void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
+ ASSERT(!info()->IsStub());
+ DeoptimizeIf(no_condition, environment, Deoptimizer::SOFT);
+}
+
+
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -1842,6 +1863,11 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
}
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ int3();
+}
+
+
void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2064,46 +2090,6 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, equal);
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ CompareRoot(reg, other_nil_value);
- __ j(equal, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->temp());
- __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
- }
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Label* is_not_object,
Label* is_object) {
@@ -2541,9 +2527,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
count++;
}
}
+ int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ movq(rsp, rbp);
__ pop(rbp);
+ no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
__ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
@@ -2558,6 +2546,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ addq(rsp, reg);
__ jmp(return_addr_reg);
}
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
@@ -3921,6 +3912,19 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
__ Integer32ToSmi(value, value);
}
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (IsInteger32Constant(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ Condition cc = masm()->CheckSmi(value);
+ DeoptimizeIf(cc, instr->environment());
+ }
+ }
} else if (FLAG_track_double_fields && representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(instr->is_in_object());
@@ -5152,106 +5156,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals = instr->hydrogen()->literals();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(rbx, Immediate(Map::kElementsKindMask));
- __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
- Map::kElementsKindShift));
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call and pick the right
- // runtime function or stub to call. Boilerplate already exists,
- // constant elements are never accessed, pass an empty fixed array.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- __ LoadHeapObject(rax, literals);
- __ Move(rbx, Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Move(rcx, isolate()->factory()->empty_fixed_array());
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(isolate()->factory()->empty_fixed_array());
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(isolate()->factory()->empty_fixed_array());
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- __ LoadHeapObject(rax, literals);
- __ Move(rbx, Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Move(rcx, isolate()->factory()->empty_fixed_array());
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Handle<FixedArray> literals = instr->hydrogen()->literals();
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= instr->hydrogen()->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
-
- // Set up the parameters to the stub/runtime call and pick the right
- // runtime function or stub to call.
- int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
- instr->hydrogen()->depth() > 1) {
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- __ LoadHeapObject(rax, literals);
- __ Move(rbx, Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Move(rcx, constant_properties);
- __ Move(rdx, Smi::FromInt(flags));
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ push(rax);
@@ -5495,7 +5399,11 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
+ if (instr->hydrogen_value()->IsSoftDeoptimize()) {
+ SoftDeoptimize(instr->environment());
+ } else {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index aa0ab9c0dd..d0dd90eeb6 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -247,8 +247,11 @@ class LCodeGen BASE_EMBEDDED {
int argc);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
+ void SoftDeoptimize(LEnvironment* environment);
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
@@ -340,18 +343,6 @@ class LCodeGen BASE_EMBEDDED {
int* offset,
AllocationSiteMode mode);
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
void EnsureSpaceForLazyDeopt(int space_needed);
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
@@ -369,7 +360,7 @@ class LCodeGen BASE_EMBEDDED {
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> jump_table_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
ZoneList<Handle<Map> > transition_maps_;
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index f49f7d67f9..1217a4000d 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -219,15 +219,6 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
value()->PrintTo(stream);
@@ -976,6 +967,11 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
}
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
if (value->EmitAtUses()) {
@@ -1451,7 +1447,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
}
return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
+ instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kCanOverflow))
? AssignEnvironment(result)
: result;
} else if (instr->representation().IsTagged()) {
@@ -1640,13 +1637,6 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
@@ -2272,7 +2262,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
needs_write_barrier_for_map) ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject())) {
return AssignEnvironment(result);
}
return result;
@@ -2335,16 +2327,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, rax), instr);
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index d1f7e76084..747d8e73d4 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -56,7 +56,6 @@ class LCodeGen;
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
- V(ArrayLiteral) \
V(BitI) \
V(BitNotI) \
V(BoundsCheck) \
@@ -90,6 +89,7 @@ class LCodeGen;
V(ConstantI) \
V(ConstantT) \
V(Context) \
+ V(DebugBreak) \
V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
@@ -115,7 +115,6 @@ class LCodeGen;
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
@@ -150,7 +149,6 @@ class LCodeGen;
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
- V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -824,26 +822,6 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
@@ -1195,6 +1173,12 @@ class LBranch: public LControlInstruction<1, 0> {
};
+class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
@@ -2372,20 +2356,6 @@ class LAllocate: public LTemplateInstruction<1, 1, 1> {
};
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 691894c0e2..31796b1283 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -677,8 +677,13 @@ static int Offset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
+void MacroAssembler::PrepareCallApiFunction(int arg_stack_space,
+ bool returns_handle) {
#if defined(_WIN64) && !defined(__MINGW64__)
+ if (!returns_handle) {
+ EnterApiExitFrame(arg_stack_space);
+ return;
+ }
// We need to prepare a slot for result handle on stack and put
// a pointer to it into 1st arg register.
EnterApiExitFrame(arg_stack_space + 1);
@@ -692,8 +697,9 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- int stack_space) {
- Label empty_result;
+ int stack_space,
+ bool returns_handle,
+ int return_value_offset) {
Label prologue;
Label promote_scheduled_exception;
Label delete_allocated_handles;
@@ -745,15 +751,25 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
PopSafepointRegisters();
}
+ // Can skip the result check for new-style callbacks
+ // TODO(dcarney): may need to pass this information down
+ // as some function_addresses might not have been registered
+ if (returns_handle) {
+ Label empty_result;
#if defined(_WIN64) && !defined(__MINGW64__)
- // rax keeps a pointer to v8::Handle, unpack it.
- movq(rax, Operand(rax, 0));
+ // rax keeps a pointer to v8::Handle, unpack it.
+ movq(rax, Operand(rax, 0));
#endif
- // Check if the result handle holds 0.
- testq(rax, rax);
- j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- movq(rax, Operand(rax, 0));
+ // Check if the result handle holds 0.
+ testq(rax, rax);
+ j(zero, &empty_result);
+ // It was non-zero. Dereference to get the result value.
+ movq(rax, Operand(rax, 0));
+ jmp(&prologue);
+ bind(&empty_result);
+ }
+ // Load the value from ReturnValue
+ movq(rax, Operand(rbp, return_value_offset * kPointerSize));
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
@@ -807,11 +823,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
- bind(&empty_result);
- // It was zero; the result is undefined.
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- jmp(&prologue);
-
bind(&promote_scheduled_exception);
TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 76941ffbaf..6c8d5ff730 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -1224,13 +1224,16 @@ class MacroAssembler: public Assembler {
// rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
// context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
// inside the exit frame (not GCed) accessible via StackSpaceOperand.
- void PrepareCallApiFunction(int arg_stack_space);
+ void PrepareCallApiFunction(int arg_stack_space, bool returns_handle);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Clobbers r14, r15, rbx and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address, int stack_space);
+ void CallApiFunctionAndReturn(Address function_address,
+ int stack_space,
+ bool returns_handle,
+ int return_value_offset_from_rbp);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index a7faf9b663..148f65ee0d 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -398,7 +398,7 @@ static void CompileCallLoadPropertyWithInterceptor(
// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 4;
+static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
// Reserves space for the extra arguments to API function in the
@@ -449,10 +449,12 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// (first fast api call extra argument)
// -- rsp[24] : api call data
// -- rsp[32] : isolate
- // -- rsp[40] : last argument
+ // -- rsp[40] : ReturnValue
+ //
+ // -- rsp[48] : last argument
// -- ...
- // -- rsp[(argc + 4) * 8] : first argument
- // -- rsp[(argc + 5) * 8] : receiver
+ // -- rsp[(argc + 5) * 8] : first argument
+ // -- rsp[(argc + 6) * 8] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -473,15 +475,23 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ movq(kScratchRegister,
ExternalReference::isolate_address(masm->isolate()));
__ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister);
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(Operand(rsp, 5 * kPointerSize), kScratchRegister);
// Prepare arguments.
- __ lea(rbx, Operand(rsp, 4 * kPointerSize));
+ STATIC_ASSERT(kFastApiCallArguments == 5);
+ __ lea(rbx, Operand(rsp, kFastApiCallArguments * kPointerSize));
+
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
#if defined(__MINGW64__)
Register arguments_arg = rcx;
#elif defined(_WIN64)
// Win64 uses first register--rcx--for returned value.
- Register arguments_arg = rdx;
+ Register arguments_arg = returns_handle ? rdx : rcx;
#else
Register arguments_arg = rdi;
#endif
@@ -490,7 +500,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiStackSpace);
+ __ PrepareCallApiFunction(kApiStackSpace, returns_handle);
__ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
__ addq(rbx, Immediate(argc * kPointerSize));
@@ -502,10 +512,10 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ CallApiFunctionAndReturn(function_address,
- argc + kFastApiCallArguments + 1);
+ argc + kFastApiCallArguments + 1,
+ returns_handle,
+ kFastApiCallArguments + 1);
}
@@ -807,6 +817,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_restore_name);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, slow);
@@ -953,6 +965,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
@@ -1284,18 +1298,24 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
- __ PushAddress(ExternalReference::isolate_address(isolate())); // isolate
+ __ PushAddress(ExternalReference::isolate_address(isolate()));
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ push(kScratchRegister); // return value
__ push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const ExecutableAccessorInfo& to the C++ callback.
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ bool returns_handle =
+ !CallbackTable::ReturnsVoid(isolate(), getter_address);
+
#if defined(__MINGW64__)
Register accessor_info_arg = rdx;
Register name_arg = rcx;
#elif defined(_WIN64)
// Win64 uses first register--rcx--for returned value.
- Register accessor_info_arg = r8;
- Register name_arg = rdx;
+ Register accessor_info_arg = returns_handle ? r8 : rdx;
+ Register name_arg = returns_handle ? rdx : rcx;
#else
Register accessor_info_arg = rsi;
Register name_arg = rdi;
@@ -1305,14 +1325,15 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ movq(name_arg, rsp);
__ push(scratch2()); // Restore return address.
- // 4 elements array for v8::Arguments::values_ and handler for name.
- const int kStackSpace = 5;
+ // v8::Arguments::values_ and handler for name.
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
// Allocate v8::AccessorInfo in non-GCed stack space.
const int kArgStackSpace = 1;
- __ PrepareCallApiFunction(kArgStackSpace);
- __ lea(rax, Operand(name_arg, 4 * kPointerSize));
+ __ PrepareCallApiFunction(kArgStackSpace, returns_handle);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 5);
+ __ lea(rax, Operand(name_arg, 5 * kPointerSize));
// v8::AccessorInfo::args_.
__ movq(StackSpaceOperand(0), rax);
@@ -1321,8 +1342,10 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// could be used to pass arguments.
__ lea(accessor_info_arg, StackSpaceOperand(0));
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ CallApiFunctionAndReturn(getter_address, kStackSpace);
+ __ CallApiFunctionAndReturn(getter_address,
+ kStackSpace,
+ returns_handle,
+ 3);
}
@@ -2268,7 +2291,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
name, depth, &miss);
// Move the return address on top of the stack.
- __ movq(rax, Operand(rsp, 4 * kPointerSize));
+ __ movq(rax, Operand(rsp, kFastApiCallArguments * kPointerSize));
__ movq(Operand(rsp, 0 * kPointerSize), rax);
GenerateFastApiCall(masm(), optimization, argc);
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 1cdaca440c..5507ac6f31 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -98,6 +98,8 @@ v8::Isolate* CcTest::default_isolate_;
int main(int argc, char* argv[]) {
v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::internal::FLAG_harmony_array_buffer = true;
+ v8::internal::FLAG_harmony_typed_arrays = true;
CcTest::set_default_isolate(v8::Isolate::GetCurrent());
CHECK(CcTest::default_isolate() != NULL);
int tests_run = 0;
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index ecffeaa6f7..0a91ed5e70 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -53,6 +53,7 @@
'test-bignum.cc',
'test-bignum-dtoa.cc',
'test-circular-queue.cc',
+ 'test-compare-nil-ic-stub.cc',
'test-compiler.cc',
'test-conversions.cc',
'test-cpu-profiler.cc',
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index e5523b282f..d1925dc257 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -39,8 +39,8 @@ test-api/ApplyInterruption: PASS || TIMEOUT
# when snapshot is on, so I am marking it PASS || FAIL
test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
-# BUG(2628): This test is flaky and sometimes fails, but should not crash.
-test-cpu-profiler/CollectCpuProfile: PASS || FAIL
+# BUG(2628): These tests are flaky and sometimes fail, but should not crash.
+test-cpu-profiler/SampleWhenFrameIsNotSetup: PASS || FAIL
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
@@ -81,6 +81,15 @@ test-serialize/DeserializeAndRunScript2: SKIP
test-serialize/DeserializeFromSecondSerialization: SKIP
##############################################################################
+[ $arch == arm || $arch == mipsel ]
+
+# BUG(2628): Signal may come when pc is close to frame enter/exit code and on
+# simulator the stack frame is not set up when it is expected to be for the pc
+# value.
+test-cpu-profiler/CollectCpuProfile: PASS || FAIL
+test-cpu-profiler/SampleWhenFrameIsNotSetup: PASS || FAIL
+
+##############################################################################
[ $arch == android_arm || $arch == android_ia32 ]
# Tests crash as there is no /tmp directory in Android.
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index c9685f8f4c..c8f67de0ab 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -80,8 +80,8 @@ using ::v8::Value;
static void ExpectString(const char* code, const char* expected) {
Local<Value> result = CompileRun(code);
CHECK(result->IsString());
- String::AsciiValue ascii(result);
- CHECK_EQ(expected, *ascii);
+ String::Utf8Value utf8(result);
+ CHECK_EQ(expected, *utf8);
}
static void ExpectInt32(const char* code, int expected) {
@@ -805,62 +805,299 @@ THREADED_TEST(GlobalProperties) {
}
+template<typename T>
+static void CheckReturnValue(const T& t) {
+ v8::ReturnValue<v8::Value> rv = t.GetReturnValue();
+ i::Object** o = *reinterpret_cast<i::Object***>(&rv);
+ CHECK_EQ(t.GetIsolate(), v8::Isolate::GetCurrent());
+ CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
+}
+
static v8::Handle<Value> handle_call(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
+ CheckReturnValue(args);
+ args.GetReturnValue().Set(v8_str("bad value"));
return v8_num(102);
}
+static v8::Handle<Value> handle_call_indirect(const v8::Arguments& args) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(args);
+ args.GetReturnValue().Set(v8_str("bad value"));
+ args.GetReturnValue().Set(v8_num(102));
+ return v8::Handle<Value>();
+}
+
+static void handle_callback(const v8::FunctionCallbackInfo<Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(v8_str("bad value"));
+ info.GetReturnValue().Set(v8_num(102));
+}
+
static v8::Handle<Value> construct_call(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
+ CheckReturnValue(args);
args.This()->Set(v8_str("x"), v8_num(1));
args.This()->Set(v8_str("y"), v8_num(2));
+ args.GetReturnValue().Set(v8_str("bad value"));
return args.This();
}
-static v8::Handle<Value> Return239(Local<String> name, const AccessorInfo&) {
+static v8::Handle<Value> construct_call_indirect(const v8::Arguments& args) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(args);
+ args.This()->Set(v8_str("x"), v8_num(1));
+ args.This()->Set(v8_str("y"), v8_num(2));
+ args.GetReturnValue().Set(v8_str("bad value"));
+ args.GetReturnValue().Set(args.This());
+ return v8::Handle<Value>();
+}
+
+static void construct_callback(
+ const v8::FunctionCallbackInfo<Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info);
+ info.This()->Set(v8_str("x"), v8_num(1));
+ info.This()->Set(v8_str("y"), v8_num(2));
+ info.GetReturnValue().Set(v8_str("bad value"));
+ info.GetReturnValue().Set(info.This());
+}
+
+
+static v8::Handle<Value> Return239(
+ Local<String> name, const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(v8_str("bad value"));
return v8_num(239);
}
+static v8::Handle<Value> Return239Indirect(
+ Local<String> name, const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info);
+ Handle<Value> value = v8_num(239);
+ info.GetReturnValue().Set(v8_str("bad value"));
+ info.GetReturnValue().Set(value);
+ return v8::Handle<Value>();
+}
-THREADED_TEST(FunctionTemplate) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+static void Return239Callback(
+ Local<String> name, const v8::PropertyCallbackInfo<Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(v8_str("bad value"));
+ info.GetReturnValue().Set(v8_num(239));
+}
+
+
+template<typename Handler>
+static void TestFunctionTemplateInitializer(Handler handler) {
+ // Test constructor calls.
{
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(handle_call);
+ v8::FunctionTemplate::New(handler);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("obj"), fun);
Local<Script> script = v8_compile("obj()");
- CHECK_EQ(102, script->Run()->Int32Value());
+ for (int i = 0; i < 30; i++) {
+ CHECK_EQ(102, script->Run()->Int32Value());
+ }
}
// Use SetCallHandler to initialize a function template, should work like the
// previous one.
{
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- fun_templ->SetCallHandler(handle_call);
+ fun_templ->SetCallHandler(handler);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("obj"), fun);
Local<Script> script = v8_compile("obj()");
- CHECK_EQ(102, script->Run()->Int32Value());
+ for (int i = 0; i < 30; i++) {
+ CHECK_EQ(102, script->Run()->Int32Value());
+ }
}
- // Test constructor calls.
- {
- Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(construct_call);
- fun_templ->SetClassName(v8_str("funky"));
- fun_templ->InstanceTemplate()->SetAccessor(v8_str("m"), Return239);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("obj"), fun);
- Local<Script> script = v8_compile("var s = new obj(); s.x");
+}
+
+
+template<typename Constructor, typename Accessor>
+static void TestFunctionTemplateAccessor(Constructor constructor,
+ Accessor accessor) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(constructor);
+ fun_templ->SetClassName(v8_str("funky"));
+ fun_templ->InstanceTemplate()->SetAccessor(v8_str("m"), accessor);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("obj"), fun);
+ Local<Value> result = v8_compile("(new obj()).toString()")->Run();
+ CHECK_EQ(v8_str("[object funky]"), result);
+ CompileRun("var obj_instance = new obj();");
+ Local<Script> script;
+ script = v8_compile("obj_instance.x");
+ for (int i = 0; i < 30; i++) {
CHECK_EQ(1, script->Run()->Int32Value());
+ }
+ script = v8_compile("obj_instance.m");
+ for (int i = 0; i < 30; i++) {
+ CHECK_EQ(239, script->Run()->Int32Value());
+ }
+}
+
- Local<Value> result = v8_compile("(new obj()).toString()")->Run();
- CHECK_EQ(v8_str("[object funky]"), result);
+THREADED_TEST(FunctionTemplate) {
+ TestFunctionTemplateInitializer(handle_call);
+ TestFunctionTemplateInitializer(handle_call_indirect);
+ TestFunctionTemplateInitializer(handle_callback);
- result = v8_compile("(new obj()).m")->Run();
- CHECK_EQ(239, result->Int32Value());
+ TestFunctionTemplateAccessor(construct_call, Return239);
+ TestFunctionTemplateAccessor(construct_call_indirect, Return239Indirect);
+ TestFunctionTemplateAccessor(construct_callback, Return239Callback);
+}
+
+
+static v8::Handle<v8::Value> SimpleDirectCallback(const v8::Arguments& args) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(args);
+ args.GetReturnValue().Set(v8_str("bad value"));
+ return v8_num(51423 + args.Length());
+}
+
+static v8::Handle<v8::Value> SimpleIndirectCallback(const v8::Arguments& args) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(args);
+ args.GetReturnValue().Set(v8_num(51423 + args.Length()));
+ return v8::Handle<v8::Value>();
+}
+
+static void SimpleCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(v8_num(51423 + info.Length()));
+}
+
+
+template<typename Callback>
+static void TestSimpleCallback(Callback callback) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New();
+ object_template->Set("callback", v8::FunctionTemplate::New(callback));
+ v8::Local<v8::Object> object = object_template->NewInstance();
+ (*env)->Global()->Set(v8_str("callback_object"), object);
+ v8::Handle<v8::Script> script;
+ script = v8_compile("callback_object.callback(17)");
+ for (int i = 0; i < 30; i++) {
+ CHECK_EQ(51424, script->Run()->Int32Value());
+ }
+ script = v8_compile("callback_object.callback(17, 24)");
+ for (int i = 0; i < 30; i++) {
+ CHECK_EQ(51425, script->Run()->Int32Value());
+ }
+}
+
+
+THREADED_TEST(SimpleCallback) {
+ TestSimpleCallback(SimpleDirectCallback);
+ TestSimpleCallback(SimpleIndirectCallback);
+ TestSimpleCallback(SimpleCallback);
+}
+
+
+template<typename T>
+void FastReturnValueCallback(const v8::FunctionCallbackInfo<v8::Value>& info);
+
+// constant return values
+static const int32_t kFastReturnValueInt32 = 471;
+static const uint32_t kFastReturnValueUint32 = 571;
+static const double kFastReturnValueDouble = 2.7;
+// variable return values
+static bool fast_return_value_bool = false;
+static bool fast_return_value_void_is_null = false;
+
+template<>
+void FastReturnValueCallback<int32_t>(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(info.GetIsolate(), kFastReturnValueInt32);
+}
+
+template<>
+void FastReturnValueCallback<uint32_t>(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(info.GetIsolate(), kFastReturnValueUint32);
+}
+
+template<>
+void FastReturnValueCallback<double>(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(info.GetIsolate(), kFastReturnValueDouble);
+}
+
+template<>
+void FastReturnValueCallback<bool>(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(info.GetIsolate(), fast_return_value_bool);
+}
+
+template<>
+void FastReturnValueCallback<void>(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ if (fast_return_value_void_is_null) {
+ info.GetReturnValue().SetNull(info.GetIsolate());
+ } else {
+ info.GetReturnValue().SetUndefined(info.GetIsolate());
+ }
+}
+
+template<typename T>
+Handle<Value> TestFastReturnValues() {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New();
+ v8::FunctionCallback callback = &FastReturnValueCallback<T>;
+ object_template->Set("callback", v8::FunctionTemplate::New(callback));
+ v8::Local<v8::Object> object = object_template->NewInstance();
+ (*env)->Global()->Set(v8_str("callback_object"), object);
+ return scope.Close(CompileRun("callback_object.callback()"));
+}
+
+THREADED_TEST(FastReturnValues) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Handle<v8::Value> value;
+ // check int_32
+ value = TestFastReturnValues<int32_t>();
+ CHECK(value->IsInt32());
+ CHECK_EQ(kFastReturnValueInt32, value->Int32Value());
+ // check uint32_t
+ value = TestFastReturnValues<uint32_t>();
+ CHECK(value->IsInt32());
+ CHECK_EQ(kFastReturnValueUint32, value->Int32Value());
+ // check double
+ value = TestFastReturnValues<double>();
+ CHECK(value->IsNumber());
+ CHECK_EQ(kFastReturnValueDouble, value->ToNumber()->Value());
+ // check bool values
+ for (int i = 0; i < 2; i++) {
+ fast_return_value_bool = i == 0;
+ value = TestFastReturnValues<bool>();
+ CHECK(value->IsBoolean());
+ CHECK_EQ(fast_return_value_bool, value->ToBoolean()->Value());
+ }
+ // check oddballs
+ for (int i = 0; i < 2; i++) {
+ fast_return_value_void_is_null = i == 0;
+ value = TestFastReturnValues<void>();
+ if (fast_return_value_void_is_null) {
+ CHECK(value->IsNull());
+ } else {
+ CHECK(value->IsUndefined());
+ }
}
}
@@ -1440,8 +1677,8 @@ Handle<Value> EmptyInterceptorSetter(Local<String> name,
Handle<Value> InterceptorGetter(Local<String> name,
const AccessorInfo& info) {
// Intercept names that start with 'interceptor_'.
- String::AsciiValue ascii(name);
- char* name_str = *ascii;
+ String::Utf8Value utf8(name);
+ char* name_str = *utf8;
char prefix[] = "interceptor_";
int i;
for (i = 0; name_str[i] && prefix[i]; ++i) {
@@ -1701,7 +1938,7 @@ THREADED_TEST(NamedPropertyHandlerGetter) {
CHECK_EQ(echo_named_call_count, 1);
const char* code = "var str = 'oddle'; obj[str] + obj.poddle;";
v8::Handle<Value> str = CompileRun(code);
- String::AsciiValue value(str);
+ String::Utf8Value value(str);
CHECK_EQ(*value, "oddlepoddle");
// Check default behavior
CHECK_EQ(v8_compile("obj.flob = 10;")->Run()->Int32Value(), 10);
@@ -2539,6 +2776,30 @@ THREADED_TEST(ResettingGlobalHandleToEmpty) {
}
+THREADED_TEST(ClearAndLeakGlobal) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::internal::GlobalHandles* global_handles = NULL;
+ int initial_handle_count = 0;
+ v8::Persistent<String> global;
+ {
+ v8::HandleScope scope(isolate);
+ Local<String> str = v8_str("str");
+ global_handles =
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
+ initial_handle_count = global_handles->NumberOfGlobalHandles();
+ global = v8::Persistent<String>::New(isolate, str);
+ }
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count + 1);
+ String* str = global.ClearAndLeak();
+ CHECK(global.IsEmpty());
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count + 1);
+ v8::Persistent<String>* new_global =
+ reinterpret_cast<v8::Persistent<String>*>(&str);
+ new_global->Dispose();
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count);
+}
+
+
THREADED_TEST(LocalHandle) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Local<String> local = v8::Local<String>::New(v8_str("str"));
@@ -3162,7 +3423,7 @@ THREADED_TEST(ScriptException) {
Local<Value> result = script->Run();
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value(try_catch.Exception());
+ String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ(*exception_value, "panama!");
}
@@ -3323,7 +3584,7 @@ THREADED_TEST(PropertyAttributes) {
CompileRun("({ toString: function() { throw 'exception';} })");
CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(exception));
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value(try_catch.Exception());
+ String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ("exception", *exception_value);
try_catch.Reset();
}
@@ -3614,7 +3875,7 @@ THREADED_TEST(ConstructCall) {
static void CheckUncle(v8::TryCatch* try_catch) {
CHECK(try_catch->HasCaught());
- String::AsciiValue str_value(try_catch->Exception());
+ String::Utf8Value str_value(try_catch->Exception());
CHECK_EQ(*str_value, "uncle?");
try_catch->Reset();
}
@@ -3992,7 +4253,7 @@ THREADED_TEST(ExternalScriptException) {
Local<Value> result = script->Run();
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value(try_catch.Exception());
+ String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ("konto", *exception_value);
}
@@ -4364,7 +4625,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
v8::TryCatch try_catch;
result = script_define->Run();
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value(try_catch.Exception());
+ String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
}
@@ -4409,7 +4670,7 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
v8::TryCatch try_catch;
result = script_define->Run();
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value(try_catch.Exception());
+ String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
}
@@ -4527,7 +4788,7 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
CompileRun("Object.defineProperty(obj1, 'x',"
"{get: function() { return 'func'; }})");
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value(try_catch.Exception());
+ String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
}
{
@@ -4535,7 +4796,7 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
CompileRun("Object.defineProperty(obj2, 'x',"
"{get: function() { return 'func'; }})");
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value(try_catch.Exception());
+ String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
}
}
@@ -4625,7 +4886,10 @@ THREADED_TEST(SetterOnly) {
THREADED_TEST(NoAccessors) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
Local<ObjectTemplate> templ = ObjectTemplate::New();
- templ->SetAccessor(v8_str("x"), NULL, NULL, v8_str("donut"));
+ templ->SetAccessor(v8_str("x"),
+ static_cast<v8::AccessorGetter>(NULL),
+ NULL,
+ v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
Local<Script> script = Script::Compile(v8_str("obj.x = 4; obj.x"));
@@ -5284,8 +5548,7 @@ THREADED_TEST(UndetectableObject) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<v8::FunctionTemplate> desc =
- v8::FunctionTemplate::New(0, v8::Handle<Value>());
+ Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
Local<v8::Object> obj = desc->GetFunction()->NewInstance();
@@ -5328,8 +5591,7 @@ THREADED_TEST(VoidLiteral) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<v8::FunctionTemplate> desc =
- v8::FunctionTemplate::New(0, v8::Handle<Value>());
+ Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
Local<v8::Object> obj = desc->GetFunction()->NewInstance();
@@ -5372,8 +5634,7 @@ THREADED_TEST(ExtensibleOnUndetectable) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<v8::FunctionTemplate> desc =
- v8::FunctionTemplate::New(0, v8::Handle<Value>());
+ Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
Local<v8::Object> obj = desc->GetFunction()->NewInstance();
@@ -7162,10 +7423,10 @@ static void ExceptionInNativeScriptTestListener(v8::Handle<v8::Message> message,
v8::Handle<Value>) {
v8::Handle<v8::Value> name_val = message->GetScriptResourceName();
CHECK(!name_val.IsEmpty() && name_val->IsString());
- v8::String::AsciiValue name(message->GetScriptResourceName());
+ v8::String::Utf8Value name(message->GetScriptResourceName());
CHECK_EQ(script_resource_name, *name);
CHECK_EQ(3, message->GetLineNumber());
- v8::String::AsciiValue source_line(message->GetSourceLine());
+ v8::String::Utf8Value source_line(message->GetSourceLine());
CHECK_EQ(" new o.foo();", *source_line);
}
@@ -9024,7 +9285,7 @@ THREADED_TEST(ConstructorForObject) {
"(function() { var o = new obj('tipli'); return o.a; })()");
CHECK(!try_catch.HasCaught());
CHECK(value->IsString());
- String::AsciiValue string_value1(value->ToString());
+ String::Utf8Value string_value1(value->ToString());
CHECK_EQ("tipli", *string_value1);
Local<Value> args2[] = { v8_str("tipli") };
@@ -9034,7 +9295,7 @@ THREADED_TEST(ConstructorForObject) {
value = object2->Get(v8_str("a"));
CHECK(!try_catch.HasCaught());
CHECK(value->IsString());
- String::AsciiValue string_value2(value->ToString());
+ String::Utf8Value string_value2(value->ToString());
CHECK_EQ("tipli", *string_value2);
// Call the Object's constructor with a Boolean.
@@ -9081,14 +9342,14 @@ THREADED_TEST(ConstructorForObject) {
value = CompileRun("new obj2(28)");
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value1(try_catch.Exception());
+ String::Utf8Value exception_value1(try_catch.Exception());
CHECK_EQ("TypeError: object is not a function", *exception_value1);
try_catch.Reset();
Local<Value> args[] = { v8_num(29) };
value = instance->CallAsConstructor(1, args);
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value2(try_catch.Exception());
+ String::Utf8Value exception_value2(try_catch.Exception());
CHECK_EQ("TypeError: #<Object> is not a function", *exception_value2);
try_catch.Reset();
}
@@ -9104,14 +9365,14 @@ THREADED_TEST(ConstructorForObject) {
value = CompileRun("new obj3(22)");
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value1(try_catch.Exception());
+ String::Utf8Value exception_value1(try_catch.Exception());
CHECK_EQ("22", *exception_value1);
try_catch.Reset();
Local<Value> args[] = { v8_num(23) };
value = instance->CallAsConstructor(1, args);
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value2(try_catch.Exception());
+ String::Utf8Value exception_value2(try_catch.Exception());
CHECK_EQ("23", *exception_value2);
try_catch.Reset();
}
@@ -9441,7 +9702,7 @@ THREADED_TEST(CallAsFunction) {
value = CompileRun("obj2(28)");
CHECK(value.IsEmpty());
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value1(try_catch.Exception());
+ String::Utf8Value exception_value1(try_catch.Exception());
CHECK_EQ("TypeError: Property 'obj2' of object #<Object> is not a function",
*exception_value1);
try_catch.Reset();
@@ -9452,7 +9713,7 @@ THREADED_TEST(CallAsFunction) {
value = instance->CallAsFunction(instance, 1, args);
CHECK(value.IsEmpty());
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value2(try_catch.Exception());
+ String::Utf8Value exception_value2(try_catch.Exception());
CHECK_EQ("TypeError: [object Object] is not a function", *exception_value2);
try_catch.Reset();
}
@@ -9469,14 +9730,14 @@ THREADED_TEST(CallAsFunction) {
// Catch the exception which is thrown by call-as-function handler
value = CompileRun("obj3(22)");
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value1(try_catch.Exception());
+ String::Utf8Value exception_value1(try_catch.Exception());
CHECK_EQ("22", *exception_value1);
try_catch.Reset();
v8::Handle<Value> args[] = { v8_num(23) };
value = instance->CallAsFunction(instance, 1, args);
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value2(try_catch.Exception());
+ String::Utf8Value exception_value2(try_catch.Exception());
CHECK_EQ("23", *exception_value2);
try_catch.Reset();
}
@@ -10374,6 +10635,7 @@ THREADED_TEST(InterceptorCallICCachedFromGlobal) {
static v8::Handle<Value> InterceptorCallICFastApi(Local<String> name,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info);
int* call_count =
reinterpret_cast<int*>(v8::External::Cast(*info.Data())->Value());
++(*call_count);
@@ -10386,6 +10648,7 @@ static v8::Handle<Value> InterceptorCallICFastApi(Local<String> name,
static v8::Handle<Value> FastApiCallback_TrivialSignature(
const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
+ CheckReturnValue(args);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
CHECK_EQ(isolate, args.GetIsolate());
CHECK_EQ(args.This(), args.Holder());
@@ -10396,6 +10659,7 @@ static v8::Handle<Value> FastApiCallback_TrivialSignature(
static v8::Handle<Value> FastApiCallback_SimpleSignature(
const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
+ CheckReturnValue(args);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
CHECK_EQ(isolate, args.GetIsolate());
CHECK_EQ(args.This()->GetPrototype(), args.Holder());
@@ -10473,29 +10737,59 @@ THREADED_TEST(CallICFastApi_DirectCall_Throw) {
}
-v8::Handle<v8::Value> DirectGetterCallback(Local<String> name,
- const v8::AccessorInfo& info) {
+static Handle<Value> DoDirectGetter() {
if (++p_getter_count % 3 == 0) {
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
GenerateSomeGarbage();
}
+ return v8_str("Direct Getter Result");
+}
+
+static v8::Handle<v8::Value> DirectGetter(Local<String> name,
+ const v8::AccessorInfo& info) {
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(v8_str("Garbage"));
+ return DoDirectGetter();
+}
+
+static v8::Handle<v8::Value> DirectGetterIndirect(
+ Local<String> name,
+ const v8::AccessorInfo& info) {
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(DoDirectGetter());
return v8::Handle<v8::Value>();
}
+static void DirectGetterCallback(
+ Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info);
+ info.GetReturnValue().Set(DoDirectGetter());
+}
-THREADED_TEST(LoadICFastApi_DirectCall_GCMoveStub) {
+
+template<typename Accessor>
+static void LoadICFastApi_DirectCall_GCMoveStub(Accessor accessor) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New();
- obj->SetAccessor(v8_str("p1"), DirectGetterCallback);
+ obj->SetAccessor(v8_str("p1"), accessor);
context->Global()->Set(v8_str("o1"), obj->NewInstance());
p_getter_count = 0;
- CompileRun(
+ v8::Handle<v8::Value> result = CompileRun(
"function f() {"
" for (var i = 0; i < 30; i++) o1.p1;"
+ " return o1.p1"
"}"
"f();");
- CHECK_EQ(30, p_getter_count);
+ CHECK_EQ(v8_str("Direct Getter Result"), result);
+ CHECK_EQ(31, p_getter_count);
+}
+
+THREADED_TEST(LoadICFastApi_DirectCall_GCMoveStub) {
+ LoadICFastApi_DirectCall_GCMoveStub(DirectGetterIndirect);
+ LoadICFastApi_DirectCall_GCMoveStub(DirectGetterCallback);
+ LoadICFastApi_DirectCall_GCMoveStub(DirectGetter);
}
@@ -11174,7 +11468,7 @@ THREADED_TEST(InterceptorICSetterExceptions) {
THREADED_TEST(NullNamedInterceptor) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
- templ->SetNamedPropertyHandler(0);
+ templ->SetNamedPropertyHandler(static_cast<v8::NamedPropertyGetter>(0));
LocalContext context;
templ->Set("x", v8_num(42));
v8::Handle<v8::Object> obj = templ->NewInstance();
@@ -11189,7 +11483,7 @@ THREADED_TEST(NullNamedInterceptor) {
THREADED_TEST(NullIndexedInterceptor) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
- templ->SetIndexedPropertyHandler(0);
+ templ->SetIndexedPropertyHandler(static_cast<v8::IndexedPropertyGetter>(0));
LocalContext context;
templ->Set("42", v8_num(42));
v8::Handle<v8::Object> obj = templ->NewInstance();
@@ -12057,6 +12351,13 @@ static void RunLoopInNewEnv() {
TEST(SetFunctionEntryHook) {
+ // FunctionEntryHook does not work well with experimental natives.
+ // Experimental natives are compiled during snapshot deserialization.
+ // This test breaks because InstallGetter (function from snapshot that
+ // only gets called from experimental natives) is compiled with entry hooks.
+ i::FLAG_harmony_typed_arrays = false;
+ i::FLAG_harmony_array_buffer = false;
+
i::FLAG_allow_natives_syntax = true;
i::FLAG_use_inlining = false;
@@ -12426,9 +12727,9 @@ static void CheckTryCatchSourceInfo(v8::Handle<v8::Script> script,
CHECK_EQ(92, message->GetEndPosition());
CHECK_EQ(2, message->GetStartColumn());
CHECK_EQ(3, message->GetEndColumn());
- v8::String::AsciiValue line(message->GetSourceLine());
+ v8::String::Utf8Value line(message->GetSourceLine());
CHECK_EQ(" throw 'nirk';", *line);
- v8::String::AsciiValue name(message->GetScriptResourceName());
+ v8::String::Utf8Value name(message->GetScriptResourceName());
CHECK_EQ(resource_name, *name);
}
@@ -12500,7 +12801,7 @@ THREADED_TEST(CallbackFunctionName) {
context->Global()->Set(v8_str("obj"), t->NewInstance());
v8::Handle<v8::Value> value = CompileRun("obj.asdf.name");
CHECK(value->IsString());
- v8::String::AsciiValue name(value);
+ v8::String::Utf8Value name(value);
CHECK_EQ("asdf", *name);
}
@@ -12972,7 +13273,7 @@ TEST(PreCompileInvalidPreparseDataError) {
Local<String> source = String::New(script);
Local<Script> compiled_script = Script::New(source, NULL, sd);
CHECK(try_catch.HasCaught());
- String::AsciiValue exception_value(try_catch.Message()->Get());
+ String::Utf8Value exception_value(try_catch.Message()->Get());
CHECK_EQ("Uncaught SyntaxError: Invalid preparser data for function bar",
*exception_value);
@@ -15852,9 +16153,12 @@ static uint32_t* ComputeStackLimit(uint32_t size) {
}
+// We need at least 165kB for an x64 debug build with clang and ASAN.
+static const int stack_breathing_room = 256 * i::KB;
+
+
TEST(SetResourceConstraints) {
- static const int K = 1024;
- uint32_t* set_limit = ComputeStackLimit(128 * K);
+ uint32_t* set_limit = ComputeStackLimit(stack_breathing_room);
// Set stack limit.
v8::ResourceConstraints constraints;
@@ -15878,8 +16182,7 @@ TEST(SetResourceConstraintsInThread) {
uint32_t* set_limit;
{
v8::Locker locker(CcTest::default_isolate());
- static const int K = 1024;
- set_limit = ComputeStackLimit(128 * K);
+ set_limit = ComputeStackLimit(stack_breathing_room);
// Set stack limit.
v8::ResourceConstraints constraints;
@@ -16241,11 +16544,11 @@ THREADED_TEST(ScriptOrigin) {
env->Global()->Get(v8::String::New("g")));
v8::ScriptOrigin script_origin_f = f->GetScriptOrigin();
- CHECK_EQ("test", *v8::String::AsciiValue(script_origin_f.ResourceName()));
+ CHECK_EQ("test", *v8::String::Utf8Value(script_origin_f.ResourceName()));
CHECK_EQ(0, script_origin_f.ResourceLineOffset()->Int32Value());
v8::ScriptOrigin script_origin_g = g->GetScriptOrigin();
- CHECK_EQ("test", *v8::String::AsciiValue(script_origin_g.ResourceName()));
+ CHECK_EQ("test", *v8::String::Utf8Value(script_origin_g.ResourceName()));
CHECK_EQ(0, script_origin_g.ResourceLineOffset()->Int32Value());
}
@@ -16258,7 +16561,7 @@ THREADED_TEST(FunctionGetInferredName) {
v8::Script::Compile(script, &origin)->Run();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("f")));
- CHECK_EQ("foo.bar.baz", *v8::String::AsciiValue(f->GetInferredName()));
+ CHECK_EQ("foo.bar.baz", *v8::String::Utf8Value(f->GetInferredName()));
}
THREADED_TEST(ScriptLineNumber) {
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 8cce08465e..9acb90ab22 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -232,6 +232,7 @@ TEST(4) {
double g;
double h;
int i;
+ double j;
double m;
double n;
float x;
@@ -294,6 +295,12 @@ TEST(4) {
__ vcvt_f64_s32(d4, s31);
__ vstr(d4, r4, OFFSET_OF(T, f));
+ // Convert from fixed point to floating point.
+ __ mov(lr, Operand(1234));
+ __ vmov(s8, lr);
+ __ vcvt_f64_s32(d4, 1);
+ __ vstr(d4, r4, OFFSET_OF(T, j));
+
// Test vabs.
__ vldr(d1, r4, OFFSET_OF(T, g));
__ vabs(d0, d1);
@@ -332,6 +339,7 @@ TEST(4) {
t.g = -2718.2818;
t.h = 31415926.5;
t.i = 0;
+ t.j = 0;
t.m = -2718.2818;
t.n = 123.456;
t.x = 4.5;
@@ -345,6 +353,7 @@ TEST(4) {
CHECK_EQ(2, t.i);
CHECK_EQ(2718.2818, t.g);
CHECK_EQ(31415926.5, t.h);
+ CHECK_EQ(617.0, t.j);
CHECK_EQ(42.0, t.f);
CHECK_EQ(1.0, t.e);
CHECK_EQ(1.000000059604644775390625, t.d);
diff --git a/deps/v8/test/cctest/test-compare-nil-ic-stub.cc b/deps/v8/test/cctest/test-compare-nil-ic-stub.cc
new file mode 100644
index 0000000000..6177fde166
--- /dev/null
+++ b/deps/v8/test/cctest/test-compare-nil-ic-stub.cc
@@ -0,0 +1,86 @@
+// Copyright 2006-2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+#include "cctest.h"
+#include "code-stubs.h"
+
+
+using namespace v8::internal;
+
+#define Types CompareNilICStub::Types
+
+TEST(TypeConstructors) {
+ Types types;
+ types.Add(CompareNilICStub::MONOMORPHIC_MAP);
+ Types types2(types);
+ CHECK_EQ(types.ToIntegral(), types2.ToIntegral());
+}
+
+TEST(ExternalICStateParsing) {
+ Types types;
+ types.Add(CompareNilICStub::UNDEFINED);
+ CompareNilICStub stub(kNonStrictEquality, kUndefinedValue, types);
+ CompareNilICStub stub2(stub.GetExtraICState());
+ CHECK_EQ(stub.GetKind(), stub2.GetKind());
+ CHECK_EQ(stub.GetNilValue(), stub2.GetNilValue());
+ CHECK_EQ(stub.GetTypes().ToIntegral(), stub2.GetTypes().ToIntegral());
+}
+
+TEST(SettingTypes) {
+ Types state;
+ CHECK(state.IsEmpty());
+ state.Add(CompareNilICStub::NULL_TYPE);
+ CHECK(!state.IsEmpty());
+ CHECK(state.Contains(CompareNilICStub::NULL_TYPE));
+ CHECK(!state.Contains(CompareNilICStub::UNDEFINED));
+ CHECK(!state.Contains(CompareNilICStub::UNDETECTABLE));
+ state.Add(CompareNilICStub::UNDEFINED);
+ CHECK(state.Contains(CompareNilICStub::UNDEFINED));
+ CHECK(state.Contains(CompareNilICStub::NULL_TYPE));
+ CHECK(!state.Contains(CompareNilICStub::UNDETECTABLE));
+}
+
+TEST(ClearTypes) {
+ Types state;
+ state.Add(CompareNilICStub::NULL_TYPE);
+ state.RemoveAll();
+ CHECK(state.IsEmpty());
+}
+
+TEST(FullCompare) {
+ Types state;
+ CHECK(Types::FullCompare() != state);
+ state.Add(CompareNilICStub::UNDEFINED);
+ CHECK(state != Types::FullCompare());
+ state.Add(CompareNilICStub::NULL_TYPE);
+ CHECK(state != Types::FullCompare());
+ state.Add(CompareNilICStub::UNDETECTABLE);
+ CHECK(state == Types::FullCompare());
+}
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index 651dc59d58..cf2092e4d3 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -249,6 +249,7 @@ TEST(ExponentNumberStr) {
CHECK_EQ(1e-106, StringToDouble(&uc, ".000001e-100", NO_FLAGS));
}
+
class OneBit1: public BitField<uint32_t, 0, 1> {};
class OneBit2: public BitField<uint32_t, 7, 1> {};
class EightBit1: public BitField<uint32_t, 0, 8> {};
@@ -286,3 +287,21 @@ TEST(BitField) {
CHECK(!EightBit1::is_valid(256));
CHECK(!EightBit2::is_valid(256));
}
+
+
+class UpperBits: public BitField64<int, 61, 3> {};
+class MiddleBits: public BitField64<int, 31, 2> {};
+
+TEST(BitField64) {
+ uint64_t x;
+
+ // Test most significant bits.
+ x = V8_2PART_UINT64_C(0xE0000000, 00000000);
+ CHECK(x == UpperBits::encode(7));
+ CHECK_EQ(7, UpperBits::decode(x));
+
+ // Test the 32/64-bit boundary bits.
+ x = V8_2PART_UINT64_C(0x00000001, 80000000);
+ CHECK(x == MiddleBits::encode(3));
+ CHECK_EQ(3, MiddleBits::decode(x));
+}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index d73be18905..22af9e75b3 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -122,7 +122,8 @@ TEST(CodeEvents) {
0,
ToAddress(0x1000),
0x100,
- ToAddress(0x10000));
+ ToAddress(0x10000),
+ NULL);
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
"bbb",
ToAddress(0x1200),
@@ -549,3 +550,74 @@ TEST(CollectCpuProfile) {
cpu_profiler->DeleteAllCpuProfiles();
}
+
+
+
+static const char* cpu_profiler_test_source2 = "function loop() {}\n"
+"function delay() { loop(); }\n"
+"function start(count) {\n"
+" var k = 0;\n"
+" do {\n"
+" delay();\n"
+" } while (++k < count*100*1000);\n"
+"}\n";
+
+// Check that the profile tree doesn't contain unexpecte traces:
+// - 'loop' can be called only by 'delay'
+// - 'delay' may be called only by 'start'
+// The profile will look like the following:
+//
+// [Top down]:
+// 135 0 (root) [-1] #1
+// 121 72 start [-1] #3
+// 49 33 delay [-1] #4
+// 16 16 loop [-1] #5
+// 14 14 (program) [-1] #2
+TEST(SampleWhenFrameIsNotSetup) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ v8::Script::Compile(v8::String::New(cpu_profiler_test_source2))->Run();
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("start")));
+
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+
+ cpu_profiler->StartCpuProfiling(profile_name);
+ int32_t repeat_count = 100;
+#if defined(USE_SIMULATOR)
+ // Simulators are much slower.
+ repeat_count = 1;
+#endif
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ function->Call(env->Global(), ARRAY_SIZE(args), args);
+ const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
+
+ CHECK_NE(NULL, profile);
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(
+ const_cast<v8::CpuProfile*>(profile))->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+
+ ScopedVector<v8::Handle<v8::String> > names(3);
+ names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::New("start");
+ CheckChildrenNames(root, names);
+
+ const v8::CpuProfileNode* startNode = FindChild(root, "start");
+ // On slow machines there may be no meaningfull samples at all, skip the
+ // check there.
+ if (startNode && startNode->GetChildrenCount() > 0) {
+ CHECK_EQ(1, startNode->GetChildrenCount());
+ const v8::CpuProfileNode* delayNode = FindChild(startNode, "delay");
+ if (delayNode->GetChildrenCount() > 0) {
+ CHECK_EQ(1, delayNode->GetChildrenCount());
+ FindChild(delayNode, "loop");
+ }
+ }
+
+ cpu_profiler->DeleteAllCpuProfiles();
+}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 1afe8901fa..c4df73ebbd 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -850,8 +850,8 @@ static void DebugEventEvaluate(v8::DebugEvent event,
v8::Handle<v8::Value> result =
evaluate_check_function->Call(exec_state, argc, argv);
if (!result->IsTrue()) {
- v8::String::AsciiValue ascii(checks[i].expected->ToString());
- V8_Fatal(__FILE__, __LINE__, "%s != %s", checks[i].expr, *ascii);
+ v8::String::Utf8Value utf8(checks[i].expected->ToString());
+ V8_Fatal(__FILE__, __LINE__, "%s != %s", checks[i].expr, *utf8);
}
}
}
@@ -923,7 +923,7 @@ static void DebugEventStepSequence(v8::DebugEvent event,
v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
argc, argv);
CHECK(result->IsString());
- v8::String::AsciiValue function_name(result->ToString());
+ v8::String::Utf8Value function_name(result->ToString());
CHECK_EQ(1, StrLength(*function_name));
CHECK_EQ((*function_name)[0],
expected_step_sequence[break_point_hit_count]);
@@ -4285,7 +4285,7 @@ static v8::Handle<v8::Array> IndexedEnum(const v8::AccessorInfo&) {
static v8::Handle<v8::Value> NamedGetter(v8::Local<v8::String> name,
const v8::AccessorInfo& info) {
- v8::String::AsciiValue n(name);
+ v8::String::Utf8Value n(name);
if (strcmp(*n, "a") == 0) {
return v8::String::New("AA");
} else if (strcmp(*n, "b") == 0) {
@@ -7008,7 +7008,7 @@ v8::Handle<v8::Context> debugger_context;
static v8::Handle<v8::Value> NamedGetterWithCallingContextCheck(
v8::Local<v8::String> name,
const v8::AccessorInfo& info) {
- CHECK_EQ(0, strcmp(*v8::String::AsciiValue(name), "a"));
+ CHECK_EQ(0, strcmp(*v8::String::Utf8Value(name), "a"));
v8::Handle<v8::Context> current = v8::Context::GetCurrent();
CHECK(current == debugee_context);
CHECK(current != debugger_context);
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 620f6fe1a7..dfc27548b5 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -376,8 +376,8 @@ TEST(DeoptimizeBinaryOperationADDString) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
v8::Handle<v8::Value> result = env->Global()->Get(v8_str("result"));
CHECK(result->IsString());
- v8::String::AsciiValue ascii(result);
- CHECK_EQ("a+an X", *ascii);
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("a+an X", *utf8);
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 8cba75b8da..84f0d8630f 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -578,6 +578,8 @@ TEST(Vfp) {
"eeb80be0 vcvt.f64.s32 d0, s1");
COMPARE(vcvt_f32_s32(s0, s2),
"eeb80ac1 vcvt.f32.s32 s0, s2");
+ COMPARE(vcvt_f64_s32(d0, 1),
+ "eeba0bef vcvt.f64.s32 d0, d0, #1");
if (CpuFeatures::IsSupported(VFP32DREGS)) {
COMPARE(vmov(d3, d27),
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index b2c9b7220e..595a2069d2 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -108,7 +108,7 @@ static const v8::HeapGraphNode* GetProperty(const v8::HeapGraphNode* node,
const char* name) {
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = node->GetChild(i);
- v8::String::AsciiValue prop_name(prop->GetName());
+ v8::String::Utf8Value prop_name(prop->GetName());
if (prop->GetType() == type && strcmp(name, *prop_name) == 0)
return prop->GetToNode();
}
@@ -121,7 +121,7 @@ static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
const v8::HeapGraphEdge* prop = node->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
if (node->GetType() == v8::HeapGraphNode::kString) {
- v8::String::AsciiValue node_name(node->GetName());
+ v8::String::Utf8Value node_name(node->GetName());
if (strcmp(contents, *node_name) == 0) return true;
}
}
@@ -285,7 +285,7 @@ TEST(HeapSnapshotCodeObjects) {
GetProperty(global, v8::HeapGraphEdge::kProperty, "anonymous");
CHECK_NE(NULL, anonymous);
CHECK_EQ(v8::HeapGraphNode::kClosure, anonymous->GetType());
- v8::String::AsciiValue anonymous_name(anonymous->GetName());
+ v8::String::Utf8Value anonymous_name(anonymous->GetName());
CHECK_EQ("", *anonymous_name);
// Find references to code.
@@ -1079,16 +1079,16 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
uint16_t class_id, v8::Handle<v8::Value> wrapper) {
if (class_id == 1) {
if (wrapper->IsString()) {
- v8::String::AsciiValue ascii(wrapper);
- if (strcmp(*ascii, "AAA") == 0)
+ v8::String::Utf8Value utf8(wrapper);
+ if (strcmp(*utf8, "AAA") == 0)
return new TestRetainedObjectInfo(1, "aaa-group", "aaa", 100);
- else if (strcmp(*ascii, "BBB") == 0)
+ else if (strcmp(*utf8, "BBB") == 0)
return new TestRetainedObjectInfo(1, "aaa-group", "aaa", 100);
}
} else if (class_id == 2) {
if (wrapper->IsString()) {
- v8::String::AsciiValue ascii(wrapper);
- if (strcmp(*ascii, "CCC") == 0)
+ v8::String::Utf8Value utf8(wrapper);
+ if (strcmp(*utf8, "CCC") == 0)
return new TestRetainedObjectInfo(2, "ccc-group", "ccc");
}
}
@@ -1254,7 +1254,7 @@ TEST(HeapSnapshotImplicitReferences) {
int implicit_targets_count = 0;
for (int i = 0, count = obj1->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = obj1->GetChild(i);
- v8::String::AsciiValue prop_name(prop->GetName());
+ v8::String::Utf8Value prop_name(prop->GetName());
if (prop->GetType() == v8::HeapGraphEdge::kInternal &&
strcmp("native", *prop_name) == 0) {
++implicit_targets_count;
@@ -1692,7 +1692,7 @@ TEST(AllStrongGcRootsHaveNames) {
for (int i = 0; i < strong_roots->GetChildrenCount(); ++i) {
const v8::HeapGraphEdge* edge = strong_roots->GetChild(i);
CHECK_EQ(v8::HeapGraphEdge::kInternal, edge->GetType());
- v8::String::AsciiValue name(edge->GetName());
+ v8::String::Utf8Value name(edge->GetName());
CHECK(isalpha(**name));
}
}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 0711454db3..ca173c25a5 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -2805,6 +2805,13 @@ TEST(Regress169209) {
i::FLAG_stress_compaction = false;
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
+
+ // Experimental natives are compiled during snapshot deserialization.
+ // This test breaks because heap layout changes in a way that closure
+ // is visited before shared function info.
+ i::FLAG_harmony_typed_arrays = false;
+ i::FLAG_harmony_array_buffer = false;
+
CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index ca0f073133..5977f095c6 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -648,7 +648,7 @@ TEST(Regress1433) {
v8::Handle<String> source = v8::String::New("1+1");
v8::Handle<Script> script = v8::Script::Compile(source);
v8::Handle<Value> result = script->Run();
- v8::String::AsciiValue ascii(result);
+ v8::String::Utf8Value utf8(result);
}
isolate->Dispose();
}
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 2cb4646d5a..dc21ac2e3c 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -467,10 +467,17 @@ TEST(EmptyObjectGroups) {
}
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define V8_WITH_ASAN 1
+#endif
+#endif
+
+
// Here is a memory use test that uses /proc, and is therefore Linux-only. We
// do not care how much memory the simulator uses, since it is only there for
-// debugging purposes.
-#if defined(__linux__) && !defined(USE_SIMULATOR)
+// debugging purposes. Testing with ASAN doesn't make sense, either.
+#if defined(__linux__) && !defined(USE_SIMULATOR) && !defined(V8_WITH_ASAN)
static uintptr_t ReadLong(char* buffer, intptr_t* position, int base) {
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 05fea0bbb6..170ec76a14 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -388,8 +388,7 @@ TEST(PreParseOverflow) {
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
size_t kProgramSize = 1024 * 1024;
- i::SmartArrayPointer<char> program(
- reinterpret_cast<char*>(malloc(kProgramSize + 1)));
+ i::SmartArrayPointer<char> program(i::NewArray<char>(kProgramSize + 1));
memset(*program, '(', kProgramSize);
program[kProgramSize] = '\0';
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index 5c7ae13094..d718993214 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -28,10 +28,6 @@
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
// Flags: --track-allocation-sites --noalways-opt
-// TODO(mvstanton): remove --nooptimize-constructed-arrays and enable
-// the constructed array code below when the feature is turned on
-// by default.
-
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
// time sticks if built with snapshot. If --smi-only-arrays is deactivated
diff --git a/deps/v8/test/mjsunit/compiler/alloc-object.js b/deps/v8/test/mjsunit/compiler/alloc-object.js
index 1d44efb549..0e593a49b3 100644
--- a/deps/v8/test/mjsunit/compiler/alloc-object.js
+++ b/deps/v8/test/mjsunit/compiler/alloc-object.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc --inline-construct
+// Flags: --allow-natives-syntax --inline-construct
// Test that inlined object allocation works for different layouts of
// objects (e.g. in object properties, slack tracking in progress or
@@ -53,7 +53,7 @@ function test(construct) {
assertEquals(5, o.y);
assertEquals(6, o.z);
%DeoptimizeFunction(test_helper);
- gc(); // Makes V8 forget about type information for test_helper.
+ %ClearFunctionTypeFeedback(test_helper);
}
function finalize_slack_tracking(construct) {
diff --git a/deps/v8/test/mjsunit/compiler/dead-code.js b/deps/v8/test/mjsunit/compiler/dead-code.js
new file mode 100644
index 0000000000..8b5bd2cf90
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dead-code.js
@@ -0,0 +1,79 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function dead1(a, b) {
+ var x = a + b;
+ return a; // x is dead
+}
+
+function dead2(a, b) {
+ var x = a | 0;
+ var y = b | 0;
+ return a; // x and y are both dead
+}
+
+function dead3(a, b) {
+ var z;
+ if(a == 2) z = a;
+ else z = b;
+ return a; // z is dead
+}
+
+function dead4(a) {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ z++;
+ }
+ return a; // z is dead
+}
+
+function dead5(a) {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ z++;
+ }
+ var w = z + a;
+ return a; // z is dead
+}
+
+assertTrue(dead1(33, 32) == 33);
+assertTrue(dead2(33, 32) == 33);
+assertTrue(dead3(33, 32) == 33);
+assertTrue(dead4(33) == 33);
+assertTrue(dead5(33) == 33);
+
+assertTrue(dead1(34, 7) == 34);
+assertTrue(dead2(34, 7) == 34);
+assertTrue(dead3(34, 7) == 34);
+assertTrue(dead4(34) == 34);
+assertTrue(dead5(34) == 34);
+
+assertTrue(dead1(3.4, 0.1) == 3.4);
+assertTrue(dead2(3.4, 0.1) == 3.4);
+assertTrue(dead3(3.4, 0.1) == 3.4);
+assertTrue(dead4(3.4) == 3.4);
+assertTrue(dead5(3.4) == 3.4);
diff --git a/deps/v8/test/mjsunit/compiler/dead-code2.js b/deps/v8/test/mjsunit/compiler/dead-code2.js
new file mode 100644
index 0000000000..b0580207ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dead-code2.js
@@ -0,0 +1,84 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function dead1(a, b) {
+ { var x = a + b; }
+ return a; // x is dead
+}
+
+function dead2(a, b) {
+ { var x = a | 0; var y = b | 0; }
+ return a; // x and y are both dead
+}
+
+function dead3(a, b) {
+ {
+ var z;
+ if(a == 2) z = a;
+ else z = b;
+ }
+ return a; // z is dead
+}
+
+function dead4(a) {
+ {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ z++;
+ }
+ }
+ return a; // z is dead
+}
+
+function dead5(a) {
+ {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ z++;
+ }
+ var w = z + a;
+ }
+ return a; // z and w are dead
+}
+
+assertTrue(dead1(33, 32) == 33);
+assertTrue(dead2(33, 32) == 33);
+assertTrue(dead3(33, 32) == 33);
+assertTrue(dead4(33) == 33);
+assertTrue(dead5(33) == 33);
+
+assertTrue(dead1(34, 7) == 34);
+assertTrue(dead2(34, 7) == 34);
+assertTrue(dead3(34, 7) == 34);
+assertTrue(dead4(34) == 34);
+assertTrue(dead5(34) == 34);
+
+assertTrue(dead1(3.4, 0.1) == 3.4);
+assertTrue(dead2(3.4, 0.1) == 3.4);
+assertTrue(dead3(3.4, 0.1) == 3.4);
+assertTrue(dead4(3.4) == 3.4);
+assertTrue(dead5(3.4) == 3.4);
diff --git a/deps/v8/test/mjsunit/compiler/dead-code3.js b/deps/v8/test/mjsunit/compiler/dead-code3.js
new file mode 100644
index 0000000000..d05797825b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dead-code3.js
@@ -0,0 +1,78 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function dead1(a, b) {
+ a + b; // dead
+ return a;
+}
+
+function dead2(a, b) {
+ a | 0; // dead
+ b | 0; // dead
+ return a; // x and y are both dead
+}
+
+function dead3(a, b) {
+ a == 2 ? a : b; // dead
+ return a;
+}
+
+function dead4(a) {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ z + 3; // dead
+ }
+ return a;
+}
+
+function dead5(a) {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ z + 3; // dead
+ z++;
+ }
+ var w = z + a;
+ return a; // z is dead
+}
+
+assertTrue(dead1(33, 32) == 33);
+assertTrue(dead2(33, 32) == 33);
+assertTrue(dead3(33, 32) == 33);
+assertTrue(dead4(33) == 33);
+assertTrue(dead5(33) == 33);
+
+assertTrue(dead1(34, 7) == 34);
+assertTrue(dead2(34, 7) == 34);
+assertTrue(dead3(34, 7) == 34);
+assertTrue(dead4(34) == 34);
+assertTrue(dead5(34) == 34);
+
+assertTrue(dead1(3.4, 0.1) == 3.4);
+assertTrue(dead2(3.4, 0.1) == 3.4);
+assertTrue(dead3(3.4, 0.1) == 3.4);
+assertTrue(dead4(3.4) == 3.4);
+assertTrue(dead5(3.4) == 3.4);
diff --git a/deps/v8/test/mjsunit/compiler/dead-code4.js b/deps/v8/test/mjsunit/compiler/dead-code4.js
new file mode 100644
index 0000000000..a5c20f81ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dead-code4.js
@@ -0,0 +1,78 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function dead1(a, b) {
+ (a | 0) + (b | 0); // dead
+ return a;
+}
+
+function dead2(a, b) {
+ a | 0; // dead
+ b | 0; // dead
+ return a; // x and y are both dead
+}
+
+function dead3(a, b) {
+ a == 2 ? (a | 0) : (b | 0); // dead
+ return a;
+}
+
+function dead4(a) {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ (z | 0) + 3; // dead
+ }
+ return a;
+}
+
+function dead5(a) {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ (z | 0) + 3; // dead
+ z++;
+ }
+ var w = z + a;
+ return a; // z is dead
+}
+
+assertTrue(dead1(33, 32) == 33);
+assertTrue(dead2(33, 32) == 33);
+assertTrue(dead3(33, 32) == 33);
+assertTrue(dead4(33) == 33);
+assertTrue(dead5(33) == 33);
+
+assertTrue(dead1(34, 7) == 34);
+assertTrue(dead2(34, 7) == 34);
+assertTrue(dead3(34, 7) == 34);
+assertTrue(dead4(34) == 34);
+assertTrue(dead5(34) == 34);
+
+assertTrue(dead1(3.4, 0.1) == 3.4);
+assertTrue(dead2(3.4, 0.1) == 3.4);
+assertTrue(dead3(3.4, 0.1) == 3.4);
+assertTrue(dead4(3.4) == 3.4);
+assertTrue(dead5(3.4) == 3.4);
diff --git a/deps/v8/test/mjsunit/compiler/dead-code5.js b/deps/v8/test/mjsunit/compiler/dead-code5.js
new file mode 100644
index 0000000000..834fa24f0e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dead-code5.js
@@ -0,0 +1,89 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function dead1(a, b) {
+ a * b;
+ a << b;
+ a >> b;
+ a >>> b;
+ a | b;
+ a & b;
+ a ^ b;
+ return a;
+}
+
+function dead2(a, b) {
+ (a | 0) * b;
+ (a | 0) << b;
+ (a | 0) >> b;
+ (a | 0) >>> b;
+ (a | 0) | b;
+ (a | 0) & b;
+ (a | 0) ^ b;
+ return a;
+}
+
+function dead3(a, b) {
+ a == 2 ? (a * b) : (b * a); // dead
+ return a;
+}
+
+function dead4(a) {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ z * 3; // dead
+ }
+ return a;
+}
+
+function dead5(a) {
+ var z = 3;
+ for (i = 0; i < 3; i++) {
+ z * 3; // dead
+ z++;
+ }
+ var w = z * a;
+ return a; // w is dead
+}
+
+assertTrue(dead1(33, 32) == 33);
+assertTrue(dead2(33, 32) == 33);
+assertTrue(dead3(33, 32) == 33);
+assertTrue(dead4(33) == 33);
+assertTrue(dead5(33) == 33);
+
+assertTrue(dead1(34, 7) == 34);
+assertTrue(dead2(34, 7) == 34);
+assertTrue(dead3(34, 7) == 34);
+assertTrue(dead4(34) == 34);
+assertTrue(dead5(34) == 34);
+
+assertTrue(dead1(3.4, 0.1) == 3.4);
+assertTrue(dead2(3.4, 0.1) == 3.4);
+assertTrue(dead3(3.4, 0.1) == 3.4);
+assertTrue(dead4(3.4) == 3.4);
+assertTrue(dead5(3.4) == 3.4);
diff --git a/deps/v8/test/mjsunit/compiler/dead-code6.js b/deps/v8/test/mjsunit/compiler/dead-code6.js
new file mode 100644
index 0000000000..ec9b8433dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dead-code6.js
@@ -0,0 +1,73 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+// Test some dead code elimination scenarios
+
+function dead1(x, y) {
+ var a = x | 0, b = y | 0;
+ a * b;
+ a << b;
+ a >> b;
+ a >>> b;
+ a | b;
+ a & b;
+ a ^ b;
+ return x;
+}
+
+function dead2(x, y) {
+ var a = x | 0, b = y | 0;
+ (a | 0) * b;
+ (a | 0) << b;
+ (a | 0) >> b;
+ (a | 0) >>> b;
+ (a | 0) | b;
+ (a | 0) & b;
+ (a | 0) ^ b;
+ return x;
+}
+
+function dead3(a, b) {
+ a == 2 ? (a * b) : (b * a); // dead
+ return a;
+}
+
+assertTrue(dead1(33, 32) == 33);
+assertTrue(dead1(33, 32) == 33);
+%OptimizeFunctionOnNextCall(dead1);
+assertTrue(dead1(33, 32) == 33);
+
+assertTrue(dead2(34, 11) == 34);
+assertTrue(dead2(34, 11) == 34);
+%OptimizeFunctionOnNextCall(dead2);
+assertTrue(dead2(34, 11) == 34);
+
+assertTrue(dead3(35, 12) == 35);
+assertTrue(dead3(35, 12) == 35);
+%OptimizeFunctionOnNextCall(dead3);
+assertTrue(dead3(35, 12) == 35);
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index 6dbb4abebf..4c50e30d54 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -256,3 +256,12 @@ test(function stringCharAt() {
assertEquals("b", "abc".charAt(1.1));
assertEquals("", "abc".charAt(4.1));
});
+
+
+test(function int32Mod() {
+ assertEquals(-0, -2147483648 % (-1));
+});
+
+test(function int32Div() {
+ assertEquals(2147483648, -2147483648 / (-1));
+});
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index b9dbc075e9..afaa369042 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -60,7 +60,7 @@ for (i = 0; i < scripts.length; i++) {
}
// This has to be updated if the number of native scripts change.
-assertEquals(14, named_native_count);
+assertEquals(16, named_native_count);
// If no snapshot is used, only the 'gc' extension is loaded.
// If snapshot is used, all extensions are cached in the snapshot.
assertTrue(extension_count == 1 || extension_count == 5);
diff --git a/deps/v8/test/mjsunit/elements-transition-hoisting.js b/deps/v8/test/mjsunit/elements-transition-hoisting.js
index e5f4f661bf..40b25cd582 100644
--- a/deps/v8/test/mjsunit/elements-transition-hoisting.js
+++ b/deps/v8/test/mjsunit/elements-transition-hoisting.js
@@ -129,7 +129,7 @@ if (support_smi_only_arrays) {
// upon can hoisted, too.
function testExactMapHoisting3(a) {
var object = new Object();
- a.foo = 0;
+ a.foo = null;
a[0] = 0;
a[1] = 1;
var count = 3;
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-1.js b/deps/v8/test/mjsunit/elide-double-hole-check-1.js
new file mode 100644
index 0000000000..63569df294
--- /dev/null
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-1.js
@@ -0,0 +1,52 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f1(a, i) {
+ return a[i]
+}
+
+var a1 = [,,,,,,,,,,,,,,,,,,0.5];
+assertEquals(undefined, f1(a1, 1));
+assertEquals(undefined, f1(a1, 1));
+%OptimizeFunctionOnNextCall(f1);
+assertEquals(undefined, f1(a1, 1));
+assertEquals(undefined, f1(a1, 1));
+
+function f2(a, i) {
+ return a[i] + 0.5;
+}
+var a2_b = [0.0,,];
+assertEquals(0.5, f2(a2_b, 0));
+assertEquals(0.5, f2(a2_b, 0));
+%OptimizeFunctionOnNextCall(f2);
+assertEquals(0.5, f2(a2_b, 0));
+assertEquals(NaN, f2(a2_b, 1));
+a2_b.__proto__ = [1.5,1.5,1.5];
+assertEquals(2, f2(a2_b, 1));
+assertEquals(0.5, f2(a2_b, 0));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-2.js b/deps/v8/test/mjsunit/elide-double-hole-check-2.js
new file mode 100644
index 0000000000..978abc3bb0
--- /dev/null
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-2.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(a, i) {
+ return a[i] + 0.5;
+}
+var arr = [0.0,,];
+assertEquals(0.5, f(arr, 0));
+assertEquals(0.5, f(arr, 0));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(0.5, f(arr, 0));
+assertEquals(NaN, f(arr, 1));
+arr.__proto__ = [1.5,1.5,1.5];
+assertEquals(2, f(arr, 1));
+assertEquals(0.5, f(arr, 0));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-3.js b/deps/v8/test/mjsunit/elide-double-hole-check-3.js
new file mode 100644
index 0000000000..f8179403ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-3.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(a, i) {
+ return a[i] + 0.5;
+}
+Array.prototype = [1.5,1.5,1.5];
+var arr = [0.0,,];
+assertEquals(0.5, f(arr, 0));
+assertEquals(0.5, f(arr, 0));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(0.5, f(arr, 0));
+assertEquals(NaN, f(arr, 1));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-4.js b/deps/v8/test/mjsunit/elide-double-hole-check-4.js
new file mode 100644
index 0000000000..e2a5505571
--- /dev/null
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-4.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f1(a, i) {
+ return a[i] + 0.5;
+}
+var arr = [0.0,,2.5];
+assertEquals(0.5, f1(arr, 0));
+assertEquals(0.5, f1(arr, 0));
+%OptimizeFunctionOnNextCall(f1);
+assertEquals(0.5, f1(arr, 0));
+Array.prototype[1] = 1.5;
+assertEquals(2, f1(arr, 1));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-5.js b/deps/v8/test/mjsunit/elide-double-hole-check-5.js
new file mode 100644
index 0000000000..d0970c8fe1
--- /dev/null
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-5.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f1(a, i) {
+ return a[i] + 0.5;
+}
+var arr = [0.0,,2.5];
+assertEquals(0.5, f1(arr, 0));
+assertEquals(0.5, f1(arr, 0));
+Array.prototype[1] = 1.5;
+%OptimizeFunctionOnNextCall(f1);
+assertEquals(2, f1(arr, 1));
+assertEquals(2, f1(arr, 1));
+assertEquals(0.5, f1(arr, 0));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-6.js b/deps/v8/test/mjsunit/elide-double-hole-check-6.js
new file mode 100644
index 0000000000..01a8096f85
--- /dev/null
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-6.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f1(a, i) {
+ return a[i] + 0.5;
+}
+var arr = [0.0,,2.5];
+assertEquals(0.5, f1(arr, 0));
+assertEquals(0.5, f1(arr, 0));
+%OptimizeFunctionOnNextCall(f1);
+assertEquals(0.5, f1(arr, 0));
+Array.prototype.__proto__[1] = 1.5;
+assertEquals(2, f1(arr, 1));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-7.js b/deps/v8/test/mjsunit/elide-double-hole-check-7.js
new file mode 100644
index 0000000000..2b13aff881
--- /dev/null
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-7.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f1(a, i) {
+ return a[i] + 0.5;
+}
+var arr = [0.0,,2.5];
+assertEquals(0.5, f1(arr, 0));
+assertEquals(0.5, f1(arr, 0));
+Array.prototype.__proto__[1] = 1.5;
+assertEquals(2, f1(arr, 1));
+%OptimizeFunctionOnNextCall(f1);
+assertEquals(2, f1(arr, 1));
+assertEquals(0.5, f1(arr, 0));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-8.js b/deps/v8/test/mjsunit/elide-double-hole-check-8.js
new file mode 100644
index 0000000000..35cc91fa8e
--- /dev/null
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-8.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f1(a, i) {
+ return a[i] + 0.5;
+}
+var arr = [0.0,,2.5];
+assertEquals(0.5, f1(arr, 0));
+assertEquals(0.5, f1(arr, 0));
+%OptimizeFunctionOnNextCall(f1);
+assertEquals(0.5, f1(arr, 0));
+Array.prototype.__proto__ = new Object();
+Array.prototype.__proto__[1] = 1.5;
+assertEquals(2, f1(arr, 1));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-9.js b/deps/v8/test/mjsunit/elide-double-hole-check-9.js
new file mode 100644
index 0000000000..4d277af695
--- /dev/null
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-9.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var do_set = false;
+
+function set_proto_elements() {
+ try {} catch (e) {} // Don't optimize or inline
+ if (do_set) Array.prototype[1] = 1.5;
+}
+
+function f(a, i) {
+ set_proto_elements();
+ return a[i] + 0.5;
+}
+
+var arr = [0.0,,2.5];
+assertEquals(0.5, f(arr, 0));
+assertEquals(0.5, f(arr, 0));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(0.5, f(arr, 0));
+do_set = true;
+assertEquals(2, f(arr, 1));
+
diff --git a/deps/v8/test/mjsunit/external-array-no-sse2.js b/deps/v8/test/mjsunit/external-array-no-sse2.js
index b3d91a534c..c9d56217c8 100644
--- a/deps/v8/test/mjsunit/external-array-no-sse2.js
+++ b/deps/v8/test/mjsunit/external-array-no-sse2.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc --noenable-sse2
+// Flags: --allow-natives-syntax --noenable-sse2
// Helper
function assertInstance(o, f) {
@@ -301,7 +301,7 @@ function run_test(test_func, array, expected_result) {
}
assertEquals(expected_result, sum);
%DeoptimizeFunction(test_func);
- gc(); // Makes V8 forget about type information for test_func.
+ %ClearFunctionTypeFeedback(test_func);
}
function run_bounds_test(test_func, array, expected_result) {
@@ -350,8 +350,7 @@ for (var t = 0; t < types.length; t++) {
%OptimizeFunctionOnNextCall(run_bounds_test);
run_bounds_test(a);
%DeoptimizeFunction(run_bounds_test);
- gc(); // Makes V8 forget about type information for test_func.
-
+ %ClearFunctionTypeFeedback(run_bounds_test);
}
function array_load_set_smi_check(a) {
@@ -370,7 +369,7 @@ for (var t = 0; t < types.length; t++) {
array_load_set_smi_check2(a);
array_load_set_smi_check2(0);
%DeoptimizeFunction(array_load_set_smi_check2);
- gc(); // Makes V8 forget about type information for array_load_set_smi_check.
+ %ClearFunctionTypeFeedback(array_load_set_smi_check2);
}
// Check handling of undefined in 32- and 64-bit external float arrays.
diff --git a/deps/v8/test/mjsunit/external-array.js b/deps/v8/test/mjsunit/external-array.js
index e61ff453ac..bfdab8abff 100644
--- a/deps/v8/test/mjsunit/external-array.js
+++ b/deps/v8/test/mjsunit/external-array.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc
+// Flags: --allow-natives-syntax
// Helper
function assertInstance(o, f) {
@@ -301,7 +301,7 @@ function run_test(test_func, array, expected_result) {
}
assertEquals(expected_result, sum);
%DeoptimizeFunction(test_func);
- gc(); // Makes V8 forget about type information for test_func.
+ %ClearFunctionTypeFeedback(test_func);
}
function run_bounds_test(test_func, array, expected_result) {
@@ -350,8 +350,7 @@ for (var t = 0; t < types.length; t++) {
%OptimizeFunctionOnNextCall(run_bounds_test);
run_bounds_test(a);
%DeoptimizeFunction(run_bounds_test);
- gc(); // Makes V8 forget about type information for test_func.
-
+ %ClearFunctionTypeFeedback(run_bounds_test);
}
function array_load_set_smi_check(a) {
@@ -370,7 +369,7 @@ for (var t = 0; t < types.length; t++) {
array_load_set_smi_check2(a);
array_load_set_smi_check2(0);
%DeoptimizeFunction(array_load_set_smi_check2);
- gc(); // Makes V8 forget about type information for array_load_set_smi_check.
+ %ClearFunctionTypeFeedback(array_load_set_smi_check2);
}
// Check handling of undefined in 32- and 64-bit external float arrays.
diff --git a/deps/v8/test/mjsunit/fast-element-smi-check.js b/deps/v8/test/mjsunit/fast-element-smi-check.js
index d0c45fe629..3083d5fe74 100644
--- a/deps/v8/test/mjsunit/fast-element-smi-check.js
+++ b/deps/v8/test/mjsunit/fast-element-smi-check.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc
+// Flags: --allow-natives-syntax
var a = new Array(10);
@@ -46,7 +46,7 @@ test_load_set_smi_2(a);
test_load_set_smi_2(a);
test_load_set_smi_2(0);
%DeoptimizeFunction(test_load_set_smi_2);
-gc(); // Makes V8 forget about type information for test_load_set_smi.
+%ClearFunctionTypeFeedback(test_load_set_smi_2);
var b = new Object();
@@ -67,4 +67,4 @@ test_load_set_smi_4(b);
test_load_set_smi_4(b);
test_load_set_smi_4(0);
%DeoptimizeFunction(test_load_set_smi_4);
-gc(); // Makes V8 forget about type information for test_load_set_smi.
+%ClearFunctionTypeFeedback(test_load_set_smi_4);
diff --git a/deps/v8/test/mjsunit/function-prototype.js b/deps/v8/test/mjsunit/function-prototype.js
index c5a5487dd0..7eac6df121 100644
--- a/deps/v8/test/mjsunit/function-prototype.js
+++ b/deps/v8/test/mjsunit/function-prototype.js
@@ -90,9 +90,28 @@ assertEquals(F.prototype, GetPrototypeOf(F));
// in GetPrototypeOf and go to a monomorphic IC load instead.
assertEquals(87, GetPrototypeOf({prototype:87}));
-// Check the prototype is not enumerable, for compatibility with
-// safari. This is deliberately incompatible with ECMA262, 15.3.5.2.
+// Check the prototype is not enumerable, as per ES5 section 15.3.5.2. Note
+// that this is in difference to ES3, which specified that function instances
+// would have enumerable prototypes (section 15.3.5.2 also).
var foo = new Function("return x");
var result = ""
for (var n in foo) result += n;
assertEquals(result, "");
+
+f = new Function('return 1;')
+var desc = Object.getOwnPropertyDescriptor(f, "prototype");
+assertFalse(desc.configurable);
+assertFalse(desc.enumerable);
+assertTrue(desc.writable);
+
+f = Function('return 1;')
+var desc = Object.getOwnPropertyDescriptor(f, "prototype");
+assertFalse(desc.configurable);
+assertFalse(desc.enumerable);
+assertTrue(desc.writable);
+
+f = function () { return 1; }
+var desc = Object.getOwnPropertyDescriptor(f, "prototype");
+assertFalse(desc.configurable);
+assertFalse(desc.enumerable);
+assertTrue(desc.writable);
diff --git a/deps/v8/test/mjsunit/generated-transition-stub.js b/deps/v8/test/mjsunit/generated-transition-stub.js
index dd1043b1c6..072ce9ce1c 100644
--- a/deps/v8/test/mjsunit/generated-transition-stub.js
+++ b/deps/v8/test/mjsunit/generated-transition-stub.js
@@ -68,7 +68,7 @@ for (j = 0; j < iteration_count; ++j) {
for (i = 0; i < 0x40000; ++i) {
a5[i] = 0;
}
- assertTrue(%HasFastSmiElements(a5));
+ assertTrue(%HasFastSmiElements(a5) || %HasFastDoubleElements(a5));
transition1(a5, 0, 2.5);
assertEquals(2.5, a5[0]);
}
diff --git a/deps/v8/test/mjsunit/harmony/generators-iteration.js b/deps/v8/test/mjsunit/harmony/generators-iteration.js
index d120ac7b3b..e717f1b4a3 100644
--- a/deps/v8/test/mjsunit/harmony/generators-iteration.js
+++ b/deps/v8/test/mjsunit/harmony/generators-iteration.js
@@ -86,6 +86,10 @@ function TestGenerator(g, expected_values_for_next,
testSend(g);
testThrow(g);
+ testNext(function*() { return yield* g(); });
+ testSend(function*() { return yield* g(); });
+ testThrow(function*() { return yield* g(); });
+
if (g instanceof GeneratorFunction) {
testNext(function() { return new g(); });
testSend(function() { return new g(); });
@@ -320,125 +324,158 @@ TestGenerator(
"foo",
[2, "1foo3", 5, "4foo6", "foofoo"]);
-function TestTryCatch() {
+function TestTryCatch(instantiate) {
function* g() { yield 1; try { yield 2; } catch (e) { yield e; } yield 3; }
function Sentinel() {}
- var iter;
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(3, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- var exn = new Sentinel;
- assertIteratorResult(exn, false, iter.throw(exn));
- assertIteratorResult(3, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- var exn = new Sentinel;
- assertIteratorResult(exn, false, iter.throw(exn));
- assertIteratorResult(3, false, iter.next());
- assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- var exn = new Sentinel;
- assertIteratorResult(exn, false, iter.throw(exn));
- assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(3, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
+
+ function Test1(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test1(instantiate(g));
+
+ function Test2(iter) {
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test2(instantiate(g));
+
+ function Test3(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test3(instantiate(g));
+
+ function Test4(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test4(instantiate(g));
+
+ function Test5(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(3, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ }
+ Test5(instantiate(g));
+
+ function Test6(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test6(instantiate(g));
+
+ function Test7(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test7(instantiate(g));
}
-TestTryCatch();
+TestTryCatch(function (g) { return g(); });
+TestTryCatch(function* (g) { return yield* g(); });
-function TestTryFinally() {
+function TestTryFinally(instantiate) {
function* g() { yield 1; try { yield 2; } finally { yield 3; } yield 4; }
function Sentinel() {}
function Sentinel2() {}
- var iter;
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(3, false, iter.next());
- assertIteratorResult(4, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(3, false, iter.throw(new Sentinel));
- assertThrows(function() { iter.next(); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(3, false, iter.throw(new Sentinel));
- assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(3, false, iter.next());
- assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(3, false, iter.next());
- assertIteratorResult(4, false, iter.next());
- assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(3, false, iter.next());
- assertIteratorResult(4, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
+
+ function Test1(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test1(instantiate(g));
+
+ function Test2(iter) {
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test2(instantiate(g));
+
+ function Test3(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test3(instantiate(g));
+
+ function Test4(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.throw(new Sentinel));
+ assertThrows(function() { iter.next(); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ }
+ Test4(instantiate(g));
+
+ function Test5(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.throw(new Sentinel));
+ assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test5(instantiate(g));
+
+ function Test6(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test6(instantiate(g));
+
+ function Test7(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test7(instantiate(g));
+
+ function Test8(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+
+ }
+ Test8(instantiate(g));
}
-TestTryFinally();
+TestTryFinally(function (g) { return g(); });
+TestTryFinally(function* (g) { return yield* g(); });
-function TestNestedTry() {
+function TestNestedTry(instantiate) {
function* g() {
try {
yield 1;
@@ -451,66 +488,82 @@ function TestNestedTry() {
}
function Sentinel() {}
function Sentinel2() {}
- var iter;
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(3, false, iter.next());
- assertIteratorResult(4, false, iter.next());
- assertIteratorResult(5, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(4, false, iter.throw(new Sentinel));
- assertThrows(function() { iter.next(); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(4, false, iter.throw(new Sentinel));
- assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- var exn = new Sentinel;
- assertIteratorResult(exn, false, iter.throw(exn));
- assertIteratorResult(3, false, iter.next());
- assertIteratorResult(4, false, iter.next());
- assertIteratorResult(5, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- var exn = new Sentinel;
- assertIteratorResult(exn, false, iter.throw(exn));
- assertIteratorResult(4, false, iter.throw(new Sentinel2));
- assertThrows(function() { iter.next(); }, Sentinel2);
- assertThrows(function() { iter.next(); }, Error);
-
- iter = g();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- var exn = new Sentinel;
- assertIteratorResult(exn, false, iter.throw(exn));
- assertIteratorResult(3, false, iter.next());
- assertIteratorResult(4, false, iter.throw(new Sentinel2));
- assertThrows(function() { iter.next(); }, Sentinel2);
- assertThrows(function() { iter.next(); }, Error);
+
+ function Test1(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertIteratorResult(5, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test1(instantiate(g));
+
+ function Test2(iter) {
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test2(instantiate(g));
+
+ function Test3(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(4, false, iter.throw(new Sentinel));
+ assertThrows(function() { iter.next(); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test3(instantiate(g));
+
+ function Test4(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(4, false, iter.throw(new Sentinel));
+ assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test4(instantiate(g));
+
+ function Test5(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertIteratorResult(5, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+
+ }
+ Test5(instantiate(g));
+
+ function Test6(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(4, false, iter.throw(new Sentinel2));
+ assertThrows(function() { iter.next(); }, Sentinel2);
+ assertThrows(function() { iter.next(); }, Error);
+ }
+ Test6(instantiate(g));
+
+ function Test7(iter) {
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.throw(new Sentinel2));
+ assertThrows(function() { iter.next(); }, Sentinel2);
+ assertThrows(function() { iter.next(); }, Error);
+
+ }
+ Test7(instantiate(g));
// That's probably enough.
}
-TestNestedTry();
+TestNestedTry(function (g) { return g(); });
+TestNestedTry(function* (g) { return yield* g(); });
function TestRecursion() {
function TestNextRecursion() {
diff --git a/deps/v8/test/mjsunit/harmony/object-observe.js b/deps/v8/test/mjsunit/harmony/object-observe.js
index 263154a406..372ffdbdb7 100644
--- a/deps/v8/test/mjsunit/harmony/object-observe.js
+++ b/deps/v8/test/mjsunit/harmony/object-observe.js
@@ -88,7 +88,11 @@ function createObserver() {
}
var observer = createObserver();
+var observer2 = createObserver();
+
assertEquals("function", typeof observer.callback);
+assertEquals("function", typeof observer2.callback);
+
var obj = {};
function frozenFunction() {}
@@ -109,9 +113,15 @@ Object.defineProperty(changeRecordWithAccessor, 'name', {
assertThrows(function() { Object.observe("non-object", observer.callback); }, TypeError);
assertThrows(function() { Object.observe(obj, nonFunction); }, TypeError);
assertThrows(function() { Object.observe(obj, frozenFunction); }, TypeError);
+assertThrows(function() { Object.observe(obj, function() {}, 1); }, TypeError);
+assertThrows(function() { Object.observe(obj, function() {}, [undefined]); }, TypeError);
+assertThrows(function() { Object.observe(obj, function() {}, [1]); }, TypeError);
+assertThrows(function() { Object.observe(obj, function() {}, ['foo', null]); }, TypeError);
+assertEquals(obj, Object.observe(obj, observer.callback, ['foo', 'bar', 'baz']));
+assertEquals(obj, Object.observe(obj, observer.callback, []));
+assertEquals(obj, Object.observe(obj, observer.callback, undefined));
assertEquals(obj, Object.observe(obj, observer.callback));
-
// Object.unobserve
assertThrows(function() { Object.unobserve(4, observer.callback); }, TypeError);
assertThrows(function() { Object.unobserve(obj, nonFunction); }, TypeError);
@@ -130,6 +140,20 @@ assertTrue(notifyDesc.writable);
assertFalse(notifyDesc.enumerable);
assertThrows(function() { notifier.notify({}); }, TypeError);
assertThrows(function() { notifier.notify({ type: 4 }); }, TypeError);
+
+assertThrows(function() { notifier.performChange(1, function(){}); }, TypeError);
+assertThrows(function() { notifier.performChange(undefined, function(){}); }, TypeError);
+assertThrows(function() { notifier.performChange('foo', undefined); }, TypeError);
+assertThrows(function() { notifier.performChange('foo', 'bar'); }, TypeError);
+var testSelf = {};
+notifier.performChange('foo', function() {
+ assertTrue(testSelf === this);
+}, testSelf);
+var self = this;
+notifier.performChange('foo', function() {
+ assertTrue(self === this);
+});
+
var notify = notifier.notify;
assertThrows(function() { notify.call(undefined, { type: 'a' }); }, TypeError);
assertThrows(function() { notify.call(null, { type: 'a' }); }, TypeError);
@@ -195,7 +219,7 @@ reset();
Object.observe(obj, observer.callback);
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
});
Object.deliverChangeRecords(observer.callback);
observer.assertCalled();
@@ -205,7 +229,7 @@ observer.assertCalled();
reset();
Object.unobserve(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
});
Object.deliverChangeRecords(observer.callback);
observer.assertNotCalled();
@@ -216,7 +240,7 @@ reset();
Object.unobserve(obj, observer.callback);
Object.unobserve(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
});
Object.deliverChangeRecords(observer.callback);
observer.assertNotCalled();
@@ -225,11 +249,11 @@ observer.assertNotCalled();
// Re-observation works and only includes changeRecords after of call.
reset();
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
});
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
});
records = undefined;
Object.deliverChangeRecords(observer.callback);
@@ -240,43 +264,327 @@ observer.assertRecordCount(1);
reset();
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
val: 1
});
Object.unobserve(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
val: 2
});
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
val: 3
});
Object.unobserve(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
val: 4
});
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo',
+ type: 'updated',
val: 5
});
Object.unobserve(obj, observer.callback);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, type: 'foo', val: 1 },
- { object: obj, type: 'foo', val: 3 },
- { object: obj, type: 'foo', val: 5 }
+ { object: obj, type: 'updated', val: 1 },
+ { object: obj, type: 'updated', val: 3 },
+ { object: obj, type: 'updated', val: 5 }
+]);
+
+// Accept
+reset();
+Object.observe(obj, observer.callback, []);
+Object.getNotifier(obj).notify({
+ type: 'new'
+});
+Object.getNotifier(obj).notify({
+ type: 'updated'
+});
+Object.getNotifier(obj).notify({
+ type: 'deleted'
+});
+Object.getNotifier(obj).notify({
+ type: 'reconfigured'
+});
+Object.getNotifier(obj).notify({
+ type: 'prototype'
+});
+Object.deliverChangeRecords(observer.callback);
+observer.assertNotCalled();
+
+reset();
+Object.observe(obj, observer.callback, ['new', 'deleted', 'prototype']);
+Object.getNotifier(obj).notify({
+ type: 'new'
+});
+Object.getNotifier(obj).notify({
+ type: 'updated'
+});
+Object.getNotifier(obj).notify({
+ type: 'deleted'
+});
+Object.getNotifier(obj).notify({
+ type: 'deleted'
+});
+Object.getNotifier(obj).notify({
+ type: 'reconfigured'
+});
+Object.getNotifier(obj).notify({
+ type: 'prototype'
+});
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: obj, type: 'new' },
+ { object: obj, type: 'deleted' },
+ { object: obj, type: 'deleted' },
+ { object: obj, type: 'prototype' }
+]);
+
+reset();
+Object.observe(obj, observer.callback, ['updated', 'foo']);
+Object.getNotifier(obj).notify({
+ type: 'new'
+});
+Object.getNotifier(obj).notify({
+ type: 'updated'
+});
+Object.getNotifier(obj).notify({
+ type: 'deleted'
+});
+Object.getNotifier(obj).notify({
+ type: 'foo'
+});
+Object.getNotifier(obj).notify({
+ type: 'bar'
+});
+Object.getNotifier(obj).notify({
+ type: 'foo'
+});
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: obj, type: 'updated' },
+ { object: obj, type: 'foo' },
+ { object: obj, type: 'foo' }
+]);
+
+reset();
+function Thingy(a, b, c) {
+ this.a = a;
+ this.b = b;
+}
+
+Thingy.MULTIPLY = 'multiply';
+Thingy.INCREMENT = 'increment';
+Thingy.INCREMENT_AND_MULTIPLY = 'incrementAndMultiply';
+
+Thingy.prototype = {
+ increment: function(amount) {
+ var notifier = Object.getNotifier(this);
+
+ notifier.performChange(Thingy.INCREMENT, function() {
+ this.a += amount;
+ this.b += amount;
+ }, this);
+
+ notifier.notify({
+ object: this,
+ type: Thingy.INCREMENT,
+ incremented: amount
+ });
+ },
+
+ multiply: function(amount) {
+ var notifier = Object.getNotifier(this);
+
+ notifier.performChange(Thingy.MULTIPLY, function() {
+ this.a *= amount;
+ this.b *= amount;
+ }, this);
+
+ notifier.notify({
+ object: this,
+ type: Thingy.MULTIPLY,
+ multiplied: amount
+ });
+ },
+
+ incrementAndMultiply: function(incAmount, multAmount) {
+ var notifier = Object.getNotifier(this);
+
+ notifier.performChange(Thingy.INCREMENT_AND_MULTIPLY, function() {
+ this.increment(incAmount);
+ this.multiply(multAmount);
+ }, this);
+
+ notifier.notify({
+ object: this,
+ type: Thingy.INCREMENT_AND_MULTIPLY,
+ incremented: incAmount,
+ multiplied: multAmount
+ });
+ }
+}
+
+Thingy.observe = function(thingy, callback) {
+ Object.observe(thingy, callback, [Thingy.INCREMENT,
+ Thingy.MULTIPLY,
+ Thingy.INCREMENT_AND_MULTIPLY,
+ 'updated']);
+}
+
+Thingy.unobserve = function(thingy, callback) {
+ Object.unobserve(thingy);
+}
+
+var thingy = new Thingy(2, 4);
+
+Object.observe(thingy, observer.callback);
+Thingy.observe(thingy, observer2.callback);
+thingy.increment(3); // { a: 5, b: 7 }
+thingy.b++; // { a: 5, b: 8 }
+thingy.multiply(2); // { a: 10, b: 16 }
+thingy.a++; // { a: 11, b: 16 }
+thingy.incrementAndMultiply(2, 2); // { a: 26, b: 36 }
+
+Object.deliverChangeRecords(observer.callback);
+Object.deliverChangeRecords(observer2.callback);
+observer.assertCallbackRecords([
+ { object: thingy, type: 'updated', name: 'a', oldValue: 2 },
+ { object: thingy, type: 'updated', name: 'b', oldValue: 4 },
+ { object: thingy, type: 'updated', name: 'b', oldValue: 7 },
+ { object: thingy, type: 'updated', name: 'a', oldValue: 5 },
+ { object: thingy, type: 'updated', name: 'b', oldValue: 8 },
+ { object: thingy, type: 'updated', name: 'a', oldValue: 10 },
+ { object: thingy, type: 'updated', name: 'a', oldValue: 11 },
+ { object: thingy, type: 'updated', name: 'b', oldValue: 16 },
+ { object: thingy, type: 'updated', name: 'a', oldValue: 13 },
+ { object: thingy, type: 'updated', name: 'b', oldValue: 18 },
+]);
+observer2.assertCallbackRecords([
+ { object: thingy, type: Thingy.INCREMENT, incremented: 3 },
+ { object: thingy, type: 'updated', name: 'b', oldValue: 7 },
+ { object: thingy, type: Thingy.MULTIPLY, multiplied: 2 },
+ { object: thingy, type: 'updated', name: 'a', oldValue: 10 },
+ {
+ object: thingy,
+ type: Thingy.INCREMENT_AND_MULTIPLY,
+ incremented: 2,
+ multiplied: 2
+ }
]);
+reset();
+function RecursiveThingy() {}
+
+RecursiveThingy.MULTIPLY_FIRST_N = 'multiplyFirstN';
+
+RecursiveThingy.prototype = {
+ __proto__: Array.prototype,
+
+ multiplyFirstN: function(amount, n) {
+ if (!n)
+ return;
+ var notifier = Object.getNotifier(this);
+ notifier.performChange(RecursiveThingy.MULTIPLY_FIRST_N, function() {
+ this[n-1] = this[n-1]*amount;
+ this.multiplyFirstN(amount, n-1);
+ }, this);
+
+ notifier.notify({
+ object: this,
+ type: RecursiveThingy.MULTIPLY_FIRST_N,
+ multiplied: amount,
+ n: n
+ });
+ },
+}
+
+RecursiveThingy.observe = function(thingy, callback) {
+ Object.observe(thingy, callback, [RecursiveThingy.MULTIPLY_FIRST_N]);
+}
+
+RecursiveThingy.unobserve = function(thingy, callback) {
+ Object.unobserve(thingy);
+}
+
+var thingy = new RecursiveThingy;
+thingy.push(1, 2, 3, 4);
+
+Object.observe(thingy, observer.callback);
+RecursiveThingy.observe(thingy, observer2.callback);
+thingy.multiplyFirstN(2, 3); // [2, 4, 6, 4]
+
+Object.deliverChangeRecords(observer.callback);
+Object.deliverChangeRecords(observer2.callback);
+observer.assertCallbackRecords([
+ { object: thingy, type: 'updated', name: '2', oldValue: 3 },
+ { object: thingy, type: 'updated', name: '1', oldValue: 2 },
+ { object: thingy, type: 'updated', name: '0', oldValue: 1 }
+]);
+observer2.assertCallbackRecords([
+ { object: thingy, type: RecursiveThingy.MULTIPLY_FIRST_N, multiplied: 2, n: 3 }
+]);
+
+reset();
+function DeckSuit() {
+ this.push('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'A', 'Q', 'K');
+}
+
+DeckSuit.SHUFFLE = 'shuffle';
+
+DeckSuit.prototype = {
+ __proto__: Array.prototype,
+
+ shuffle: function() {
+ var notifier = Object.getNotifier(this);
+ notifier.performChange(DeckSuit.SHUFFLE, function() {
+ this.reverse();
+ this.sort(function() { return Math.random()* 2 - 1; });
+ var cut = this.splice(0, 6);
+ Array.prototype.push.apply(this, cut);
+ this.reverse();
+ this.sort(function() { return Math.random()* 2 - 1; });
+ var cut = this.splice(0, 6);
+ Array.prototype.push.apply(this, cut);
+ this.reverse();
+ this.sort(function() { return Math.random()* 2 - 1; });
+ }, this);
+
+ notifier.notify({
+ object: this,
+ type: DeckSuit.SHUFFLE
+ });
+ },
+}
+
+DeckSuit.observe = function(thingy, callback) {
+ Object.observe(thingy, callback, [DeckSuit.SHUFFLE]);
+}
+
+DeckSuit.unobserve = function(thingy, callback) {
+ Object.unobserve(thingy);
+}
+
+var deck = new DeckSuit;
+
+DeckSuit.observe(deck, observer2.callback);
+deck.shuffle();
+
+Object.deliverChangeRecords(observer2.callback);
+observer2.assertCallbackRecords([
+ { object: deck, type: DeckSuit.SHUFFLE }
+]);
+
// Observing multiple objects; records appear in order.
reset();
var obj2 = {};
@@ -285,20 +593,20 @@ Object.observe(obj, observer.callback);
Object.observe(obj3, observer.callback);
Object.observe(obj2, observer.callback);
Object.getNotifier(obj).notify({
- type: 'foo1',
+ type: 'new',
});
Object.getNotifier(obj2).notify({
- type: 'foo2',
+ type: 'updated',
});
Object.getNotifier(obj3).notify({
- type: 'foo3',
+ type: 'deleted',
});
Object.observe(obj3, observer.callback);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, type: 'foo1' },
- { object: obj2, type: 'foo2' },
- { object: obj3, type: 'foo3' }
+ { object: obj, type: 'new' },
+ { object: obj2, type: 'updated' },
+ { object: obj3, type: 'deleted' }
]);
@@ -760,13 +1068,22 @@ observer.assertCallbackRecords([
reset();
var array = [1, 2];
Object.observe(array, observer.callback);
+Array.observe(array, observer2.callback);
array.push(3, 4);
+array.push(5);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
{ object: array, name: '2', type: 'new' },
{ object: array, name: 'length', type: 'updated', oldValue: 2 },
{ object: array, name: '3', type: 'new' },
{ object: array, name: 'length', type: 'updated', oldValue: 3 },
+ { object: array, name: '4', type: 'new' },
+ { object: array, name: 'length', type: 'updated', oldValue: 4 },
+]);
+Object.deliverChangeRecords(observer2.callback);
+observer2.assertCallbackRecords([
+ { object: array, type: 'splice', index: 2, removed: [], addedCount: 2 },
+ { object: array, type: 'splice', index: 4, removed: [], addedCount: 1 }
]);
// Pop
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 09097db9f5..585d503a0f 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -40,6 +40,9 @@ regress/regress-524: SKIP
# Skip long running test in debug and allow it to timeout in release mode.
# regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
+# This test non-deterministically runs out of memory on Windows ia32.
+regress/regress-crbug-160010: SKIP
+
# Deferred stack trace formatting is temporarily disabled.
stack-traces-gc: PASS || FAIL
@@ -74,8 +77,7 @@ unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == androi
json-recursive: PASS, (PASS || FAIL) if $mode == debug
##############################################################################
-# Skip long running test that times out in debug mode or goes OOM on android.
-regress/regress-crbug-160010: PASS, SKIP if ($mode == debug || $arch == android_arm)
+# Skip long running tests that time out in debug mode.
generated-transition-stub: PASS, SKIP if $mode == debug
##############################################################################
diff --git a/deps/v8/test/mjsunit/regress/regress-241344.js b/deps/v8/test/mjsunit/regress/regress-241344.js
new file mode 100644
index 0000000000..32a9dd3376
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-241344.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Create a JSON string for an object with indexed properties.
+// Parsing that string creates a sparse array that grows dense.
+
+var jsonstring = '{"0":0.1, "10000":0.4, ';
+for (var i = 1; i < 9999; i++) {
+ jsonstring += '"' + i + '":0.2, ';
+}
+jsonstring += '"9999":0.3}';
+
+var jsonobject = JSON.parse(jsonstring);
+for (var i = 1; i < 9999; i++) {
+ assertEquals(0.2, jsonobject[i]);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-2681.js b/deps/v8/test/mjsunit/regress/regress-2681.js
new file mode 100644
index 0000000000..9841d84843
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2681.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --noincremental-marking --harmony-generators
+
+// Check that we are not flushing code for generators.
+
+function flush_all_code() {
+ // Each GC ages code, and currently 6 gcs will flush all code.
+ for (var i = 0; i < 10; i++) gc();
+}
+
+function* g() {
+ yield 1;
+ yield 2;
+}
+
+var o = g();
+assertEquals({ value: 1, done: false }, o.next());
+
+flush_all_code();
+
+assertEquals({ value: 2, done: false }, o.next());
+assertEquals({ value: undefined, done: true }, o.next());
diff --git a/deps/v8/test/mjsunit/regress/regress-2686.js b/deps/v8/test/mjsunit/regress/regress-2686.js
new file mode 100644
index 0000000000..bd6106f9ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2686.js
@@ -0,0 +1,32 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check Function doesn't use String.prototype.indexOf.
+
+assertThrows(function() { Function('){ function foo(', '}') }, SyntaxError);
+String.prototype.indexOf = function () { return -1; }
+assertThrows(function() { Function('){ function foo(', '}') }, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-233737.js b/deps/v8/test/mjsunit/regress/regress-crbug-233737.js
new file mode 100644
index 0000000000..835726b224
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-233737.js
@@ -0,0 +1,42 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var a = new Array(2);
+a[0] = 1;
+assertTrue(%HasFastSmiElements(a));
+assertTrue(%HasFastHoleyElements(a));
+
+function hole(i) {
+ return a[i] << 0;
+}
+
+assertEquals(1, hole(0));
+assertEquals(1, hole(0));
+%OptimizeFunctionOnNextCall(hole);
+assertEquals(0, hole(1)); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-242502.js b/deps/v8/test/mjsunit/regress/regress-crbug-242502.js
new file mode 100644
index 0000000000..8ee764029d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-242502.js
@@ -0,0 +1,66 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+function f() {
+ return 23;
+}
+
+function call(o) {
+ return o['']();
+}
+
+function test() {
+ var o1 = %ToFastProperties(Object.create({ foo:1 }, { '': { value:f }}));
+ var o2 = %ToFastProperties(Object.create({ bar:1 }, { '': { value:f }}));
+ var o3 = %ToFastProperties(Object.create({ baz:1 }, { '': { value:f }}));
+ var o4 = %ToFastProperties(Object.create({ qux:1 }, { '': { value:f }}));
+ var o5 = %ToFastProperties(Object.create({ loo:1 }, { '': { value:f }}));
+ // Called twice on o1 to turn monomorphic.
+ assertEquals(23, call(o1));
+ assertEquals(23, call(o1));
+ // Called on four other objects to turn megamorphic.
+ assertEquals(23, call(o2));
+ assertEquals(23, call(o3));
+ assertEquals(23, call(o4));
+ assertEquals(23, call(o5));
+ return o1;
+}
+
+// Fill stub cache with entries.
+test();
+
+// Clear stub cache during GC.
+gc();
+
+// Turn IC megamorphic again.
+var oboom = test();
+
+// Optimize with previously cleared stub cache.
+%OptimizeFunctionOnNextCall(call);
+assertEquals(23, call(oboom));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-242870.js b/deps/v8/test/mjsunit/regress/regress-crbug-242870.js
new file mode 100644
index 0000000000..7183375ca8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-242870.js
@@ -0,0 +1,43 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var non_const_true = true;
+
+function f() {
+ return (non_const_true || true && g());
+}
+
+function g() {
+ for (;;) {}
+}
+
+assertTrue(f());
+assertTrue(f());
+%OptimizeFunctionOnNextCall(f);
+assertTrue(f());
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
new file mode 100644
index 0000000000..be54be6740
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
@@ -0,0 +1,60 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// stubbed version of ToNumber
+function ToNumber(x) {
+ return 311;
+}
+
+// Reduced version of String.fromCharCode;
+// does not actually do the same calculation but exhibits untagging bug.
+function StringFromCharCode(code) {
+ var n = %_ArgumentsLength();
+ var one_byte = %NewString(n, true);
+ var i;
+ for (i = 0; i < n; i++) {
+ var code = %_Arguments(i);
+ if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+ if (code > 0xff) break;
+ }
+
+ var two_byte = %NewString(n - i, false);
+ for (var j = 0; i < n; i++, j++) {
+ var code = %_Arguments(i);
+ %_TwoByteSeqStringSetChar(two_byte, j, code);
+ }
+ return one_byte + two_byte;
+}
+
+StringFromCharCode(0xFFF, 0xFFF);
+StringFromCharCode(0x7C, 0x7C);
+%OptimizeFunctionOnNextCall(StringFromCharCode);
+StringFromCharCode(0x7C, 0x7C);
+StringFromCharCode(0xFFF, 0xFFF);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js
new file mode 100644
index 0000000000..6acc2f285e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js
@@ -0,0 +1,35 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+String.fromCharCode(0xFFF, 0xFFF);
+String.fromCharCode(0x7C, 0x7C);
+%OptimizeFunctionOnNextCall(String.fromCharCode);
+String.fromCharCode(0x7C, 0x7C);
+String.fromCharCode(0xFFF, 0xFFF);
+
diff --git a/deps/v8/test/mjsunit/track-fields.js b/deps/v8/test/mjsunit/track-fields.js
index bcf37ae637..ced006c4fb 100644
--- a/deps/v8/test/mjsunit/track-fields.js
+++ b/deps/v8/test/mjsunit/track-fields.js
@@ -261,3 +261,67 @@ assertEquals(some_object20, obj20);
assertEquals(100, o20.smi);
assertEquals(100, o20.dbl);
assertEquals(100, o20.dbl);
+
+function attr_mismatch_obj(v, writable) {
+ var o = {};
+ o.some_value = v;
+ Object.defineProperty(o, "second_value", {value:10, writable:writable});
+ return o;
+}
+
+function is_writable(o, p) {
+ return Object.getOwnPropertyDescriptor(o,p).writable;
+}
+
+var writable = attr_mismatch_obj(10, true);
+var non_writable1 = attr_mismatch_obj(10.5, false);
+assertTrue(is_writable(writable,"second_value"));
+assertFalse(is_writable(non_writable1,"second_value"));
+writable.some_value = 20.5;
+assertTrue(is_writable(writable,"second_value"));
+var non_writable2 = attr_mismatch_obj(10.5, false);
+assertTrue(%HaveSameMap(non_writable1, non_writable2));
+
+function test_f(v) {
+ var o = {};
+ o.vbf = v;
+ o.func = test_f;
+ return o;
+}
+
+function test_fic(o) {
+ return o.vbf;
+}
+
+var ftest1 = test_f(10);
+var ftest2 = test_f(10);
+var ftest3 = test_f(10.5);
+var ftest4 = test_f(10);
+assertFalse(%HaveSameMap(ftest1, ftest3));
+assertTrue(%HaveSameMap(ftest3, ftest4));
+ftest2.func = is_writable;
+test_fic(ftest1);
+test_fic(ftest2);
+test_fic(ftest3);
+test_fic(ftest4);
+assertTrue(%HaveSameMap(ftest1, ftest3));
+assertTrue(%HaveSameMap(ftest3, ftest4));
+
+// Test representations and transition conversions.
+function read_first_double(o) {
+ return o.first_double;
+}
+var df1 = {};
+df1.first_double=1.6;
+read_first_double(df1);
+read_first_double(df1);
+function some_function1() { return 10; }
+var df2 = {};
+df2.first_double = 1.7;
+df2.second_function = some_function1;
+function some_function2() { return 20; }
+var df3 = {};
+df3.first_double = 1.7;
+df3.second_function = some_function2;
+df1.first_double = 10;
+read_first_double(df1);
diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js
index 5d061ae8c4..e773f4b0f7 100644
--- a/deps/v8/test/mjsunit/unbox-double-arrays.js
+++ b/deps/v8/test/mjsunit/unbox-double-arrays.js
@@ -352,6 +352,9 @@ function testOneArrayType(allocator) {
assertTrue(%HasFastDoubleElements(large_array));
}
+// Force gc here to start with a clean heap if we repeat this test multiple
+// times.
+gc();
testOneArrayType(make_object_like_array);
testOneArrayType(Array);
diff --git a/deps/v8/test/test262/README b/deps/v8/test/test262/README
index 1ddbc709be..680ab77d7e 100644
--- a/deps/v8/test/test262/README
+++ b/deps/v8/test/test262/README
@@ -4,11 +4,11 @@ tests from
http://hg.ecmascript.org/tests/test262
-at revision 360 as 'data' in this directory. Using later version
+at revision 365 as 'data' in this directory. Using later version
may be possible but the tests are only known to pass (and indeed run)
with that revision.
-hg clone -r 360 http://hg.ecmascript.org/tests/test262 data
+hg clone -r 365 http://hg.ecmascript.org/tests/test262 data
If you do update to a newer revision you may have to change the test
harness adapter code since it uses internal functionality from the
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index c07c30270c..fc03504dca 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -36,8 +36,8 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
-TEST_262_ARCHIVE_REVISION = "53c4ade82d14" # This is the r360 revision.
-TEST_262_ARCHIVE_MD5 = "5fa4918b00e5d60e57bdd3c05deaeb0c"
+TEST_262_ARCHIVE_REVISION = "99aac3bc1cad" # This is the r365 revision.
+TEST_262_ARCHIVE_MD5 = "aadbd720ce9bdb4f8f3de066f4d7eea1"
TEST_262_URL = "http://hg.ecmascript.org/tests/test262/archive/%s.tar.bz2"
TEST_262_HARNESS = ["sta.js", "testBuiltInObject.js"]
TEST_262_SKIP = ["intl402"]
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index fc6296a616..f31fc4a9cb 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -27,978 +27,901 @@
{
'includes': ['../../build/common.gypi'],
- 'conditions': [
- ['use_system_v8==0', {
- 'targets': [
+ 'targets': [
+ {
+ 'target_name': 'v8',
+ 'dependencies_traverse': 1,
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['v8_use_snapshot=="true"', {
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': ['v8_base.<(v8_target_arch)', 'v8_snapshot'],
+ },
{
- 'target_name': 'v8',
- 'dependencies_traverse': 1,
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['v8_use_snapshot=="true"', {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base.<(v8_target_arch)', 'v8_snapshot'],
- },
- {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': [
- 'v8_base.<(v8_target_arch)',
- 'v8_nosnapshot.<(v8_target_arch)',
- ],
- }],
- ['component=="shared_library"', {
- 'type': '<(component)',
- 'sources': [
- # Note: on non-Windows we still build this file so that gyp
- # has some sources to link into the component.
- '../../src/v8dll-main.cc',
- ],
- 'defines': [
- 'V8_SHARED',
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'V8_SHARED',
- 'USING_V8_SHARED',
- ],
- },
- 'target_conditions': [
- ['OS=="android" and _toolset=="target"', {
- 'libraries': [
- '-llog',
- ],
- 'include_dirs': [
- 'src/common/android/include',
- ],
- }],
- ],
- 'conditions': [
- ['OS=="mac"', {
- 'xcode_settings': {
- 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
- },
- }],
- ['soname_version!=""', {
- 'product_extension': 'so.<(soname_version)',
- }],
- ],
- },
- {
- 'type': 'none',
- }],
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': [
+ 'v8_base.<(v8_target_arch)',
+ 'v8_nosnapshot.<(v8_target_arch)',
+ ],
+ }],
+ ['component=="shared_library"', {
+ 'type': '<(component)',
+ 'sources': [
+ # Note: on non-Windows we still build this file so that gyp
+ # has some sources to link into the component.
+ '../../src/v8dll-main.cc',
+ ],
+ 'defines': [
+ 'V8_SHARED',
+ 'BUILDING_V8_SHARED',
],
'direct_dependent_settings': {
- 'include_dirs': [
- '../../include',
+ 'defines': [
+ 'V8_SHARED',
+ 'USING_V8_SHARED',
],
},
- },
- {
- 'target_name': 'v8_snapshot',
- 'type': 'static_library',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': [
- 'mksnapshot.<(v8_target_arch)#host',
- 'js2c#host',
+ 'target_conditions': [
+ ['OS=="android" and _toolset=="target"', {
+ 'libraries': [
+ '-llog',
],
- }, {
- 'toolsets': ['target'],
- 'dependencies': ['mksnapshot.<(v8_target_arch)', 'js2c'],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'V8_SHARED',
- 'BUILDING_V8_SHARED',
+ 'include_dirs': [
+ 'src/common/android/include',
],
- 'direct_dependent_settings': {
- 'defines': [
- 'V8_SHARED',
- 'USING_V8_SHARED',
- ],
+ }],
+ ],
+ 'conditions': [
+ ['OS=="mac"', {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
},
}],
+ ['soname_version!=""', {
+ 'product_extension': 'so.<(soname_version)',
+ }],
],
+ },
+ {
+ 'type': 'none',
+ }],
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../../include',
+ ],
+ },
+ },
+ {
+ 'target_name': 'v8_snapshot',
+ 'type': 'static_library',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
'dependencies': [
- 'v8_base.<(v8_target_arch)',
+ 'mksnapshot.<(v8_target_arch)#host',
+ 'js2c#host',
],
- 'include_dirs+': [
- '../../src',
+ }, {
+ 'toolsets': ['target'],
+ 'dependencies': ['mksnapshot.<(v8_target_arch)', 'js2c'],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'V8_SHARED',
+ 'BUILDING_V8_SHARED',
],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'V8_SHARED',
+ 'USING_V8_SHARED',
+ ],
+ },
+ }],
+ ],
+ 'dependencies': [
+ 'v8_base.<(v8_target_arch)',
+ ],
+ 'include_dirs+': [
+ '../../src',
+ ],
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ '<(INTERMEDIATE_DIR)/snapshot.cc',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'run_mksnapshot',
+ 'inputs': [
+ '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot.<(v8_target_arch)<(EXECUTABLE_SUFFIX)',
+ ],
+ 'outputs': [
'<(INTERMEDIATE_DIR)/snapshot.cc',
],
- 'actions': [
- {
- 'action_name': 'run_mksnapshot',
- 'inputs': [
- '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot.<(v8_target_arch)<(EXECUTABLE_SUFFIX)',
- ],
- 'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- ],
- 'variables': {
- 'mksnapshot_flags': [
- '--log-snapshot-positions',
- '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
- ],
- },
- 'action': [
- '<@(_inputs)',
- '<@(mksnapshot_flags)',
- '<@(_outputs)'
- ],
- },
+ 'variables': {
+ 'mksnapshot_flags': [
+ '--log-snapshot-positions',
+ '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
+ ],
+ },
+ 'action': [
+ '<@(_inputs)',
+ '<@(mksnapshot_flags)',
+ '<@(_outputs)'
],
},
- {
- 'target_name': 'v8_nosnapshot.<(v8_target_arch)',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_base.<(v8_target_arch)',
+ ],
+ },
+ {
+ 'target_name': 'v8_nosnapshot.<(v8_target_arch)',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8_base.<(v8_target_arch)',
+ ],
+ 'include_dirs+': [
+ '../../src',
+ ],
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ '../../src/snapshot-empty.cc',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ 'dependencies': ['js2c#host'],
+ }, {
+ 'toolsets': ['target'],
+ 'dependencies': ['js2c'],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ 'V8_SHARED',
],
- 'include_dirs+': [
- '../../src',
+ }],
+ ]
+ },
+ {
+ 'target_name': 'v8_base.<(v8_target_arch)',
+ 'type': 'static_library',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '../../src',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ '../../src/accessors.cc',
+ '../../src/accessors.h',
+ '../../src/allocation.cc',
+ '../../src/allocation.h',
+ '../../src/api.cc',
+ '../../src/api.h',
+ '../../src/apiutils.h',
+ '../../src/arguments.cc',
+ '../../src/arguments.h',
+ '../../src/assembler.cc',
+ '../../src/assembler.h',
+ '../../src/ast.cc',
+ '../../src/ast.h',
+ '../../src/atomicops.h',
+ '../../src/atomicops_internals_x86_gcc.cc',
+ '../../src/bignum-dtoa.cc',
+ '../../src/bignum-dtoa.h',
+ '../../src/bignum.cc',
+ '../../src/bignum.h',
+ '../../src/bootstrapper.cc',
+ '../../src/bootstrapper.h',
+ '../../src/builtins.cc',
+ '../../src/builtins.h',
+ '../../src/bytecodes-irregexp.h',
+ '../../src/cached-powers.cc',
+ '../../src/cached-powers.h',
+ '../../src/char-predicates-inl.h',
+ '../../src/char-predicates.h',
+ '../../src/checks.cc',
+ '../../src/checks.h',
+ '../../src/circular-queue-inl.h',
+ '../../src/circular-queue.cc',
+ '../../src/circular-queue.h',
+ '../../src/code-stubs.cc',
+ '../../src/code-stubs.h',
+ '../../src/code-stubs-hydrogen.cc',
+ '../../src/code.h',
+ '../../src/codegen.cc',
+ '../../src/codegen.h',
+ '../../src/compilation-cache.cc',
+ '../../src/compilation-cache.h',
+ '../../src/compiler.cc',
+ '../../src/compiler.h',
+ '../../src/contexts.cc',
+ '../../src/contexts.h',
+ '../../src/conversions-inl.h',
+ '../../src/conversions.cc',
+ '../../src/conversions.h',
+ '../../src/counters.cc',
+ '../../src/counters.h',
+ '../../src/cpu-profiler-inl.h',
+ '../../src/cpu-profiler.cc',
+ '../../src/cpu-profiler.h',
+ '../../src/cpu.h',
+ '../../src/data-flow.cc',
+ '../../src/data-flow.h',
+ '../../src/date.cc',
+ '../../src/date.h',
+ '../../src/dateparser-inl.h',
+ '../../src/dateparser.cc',
+ '../../src/dateparser.h',
+ '../../src/debug-agent.cc',
+ '../../src/debug-agent.h',
+ '../../src/debug.cc',
+ '../../src/debug.h',
+ '../../src/deoptimizer.cc',
+ '../../src/deoptimizer.h',
+ '../../src/disasm.h',
+ '../../src/disassembler.cc',
+ '../../src/disassembler.h',
+ '../../src/diy-fp.cc',
+ '../../src/diy-fp.h',
+ '../../src/double.h',
+ '../../src/dtoa.cc',
+ '../../src/dtoa.h',
+ '../../src/elements-kind.cc',
+ '../../src/elements-kind.h',
+ '../../src/elements.cc',
+ '../../src/elements.h',
+ '../../src/execution.cc',
+ '../../src/execution.h',
+ '../../src/extensions/externalize-string-extension.cc',
+ '../../src/extensions/externalize-string-extension.h',
+ '../../src/extensions/gc-extension.cc',
+ '../../src/extensions/gc-extension.h',
+ '../../src/extensions/statistics-extension.cc',
+ '../../src/extensions/statistics-extension.h',
+ '../../src/factory.cc',
+ '../../src/factory.h',
+ '../../src/fast-dtoa.cc',
+ '../../src/fast-dtoa.h',
+ '../../src/fixed-dtoa.cc',
+ '../../src/fixed-dtoa.h',
+ '../../src/flag-definitions.h',
+ '../../src/flags.cc',
+ '../../src/flags.h',
+ '../../src/frames-inl.h',
+ '../../src/frames.cc',
+ '../../src/frames.h',
+ '../../src/full-codegen.cc',
+ '../../src/full-codegen.h',
+ '../../src/func-name-inferrer.cc',
+ '../../src/func-name-inferrer.h',
+ '../../src/gdb-jit.cc',
+ '../../src/gdb-jit.h',
+ '../../src/global-handles.cc',
+ '../../src/global-handles.h',
+ '../../src/globals.h',
+ '../../src/handles-inl.h',
+ '../../src/handles.cc',
+ '../../src/handles.h',
+ '../../src/hashmap.h',
+ '../../src/heap-inl.h',
+ '../../src/heap-profiler.cc',
+ '../../src/heap-profiler.h',
+ '../../src/heap-snapshot-generator-inl.h',
+ '../../src/heap-snapshot-generator.cc',
+ '../../src/heap-snapshot-generator.h',
+ '../../src/heap.cc',
+ '../../src/heap.h',
+ '../../src/hydrogen-instructions.cc',
+ '../../src/hydrogen-instructions.h',
+ '../../src/hydrogen.cc',
+ '../../src/hydrogen.h',
+ '../../src/ic-inl.h',
+ '../../src/ic.cc',
+ '../../src/ic.h',
+ '../../src/incremental-marking.cc',
+ '../../src/incremental-marking.h',
+ '../../src/interface.cc',
+ '../../src/interface.h',
+ '../../src/interpreter-irregexp.cc',
+ '../../src/interpreter-irregexp.h',
+ '../../src/isolate.cc',
+ '../../src/isolate.h',
+ '../../src/json-parser.h',
+ '../../src/json-stringifier.h',
+ '../../src/jsregexp-inl.h',
+ '../../src/jsregexp.cc',
+ '../../src/jsregexp.h',
+ '../../src/lazy-instance.h',
+ '../../src/list-inl.h',
+ '../../src/list.h',
+ '../../src/lithium-allocator-inl.h',
+ '../../src/lithium-allocator.cc',
+ '../../src/lithium-allocator.h',
+ '../../src/lithium.cc',
+ '../../src/lithium.h',
+ '../../src/liveedit.cc',
+ '../../src/liveedit.h',
+ '../../src/log-inl.h',
+ '../../src/log-utils.cc',
+ '../../src/log-utils.h',
+ '../../src/log.cc',
+ '../../src/log.h',
+ '../../src/macro-assembler.h',
+ '../../src/mark-compact.cc',
+ '../../src/mark-compact.h',
+ '../../src/marking-thread.h',
+ '../../src/marking-thread.cc',
+ '../../src/messages.cc',
+ '../../src/messages.h',
+ '../../src/natives.h',
+ '../../src/objects-debug.cc',
+ '../../src/objects-inl.h',
+ '../../src/objects-printer.cc',
+ '../../src/objects-visiting.cc',
+ '../../src/objects-visiting.h',
+ '../../src/objects.cc',
+ '../../src/objects.h',
+ '../../src/once.cc',
+ '../../src/once.h',
+ '../../src/optimizing-compiler-thread.h',
+ '../../src/optimizing-compiler-thread.cc',
+ '../../src/parser.cc',
+ '../../src/parser.h',
+ '../../src/platform-posix.h',
+ '../../src/platform-tls-mac.h',
+ '../../src/platform-tls-win32.h',
+ '../../src/platform-tls.h',
+ '../../src/platform.h',
+ '../../src/preparse-data-format.h',
+ '../../src/preparse-data.cc',
+ '../../src/preparse-data.h',
+ '../../src/preparser.cc',
+ '../../src/preparser.h',
+ '../../src/prettyprinter.cc',
+ '../../src/prettyprinter.h',
+ '../../src/profile-generator-inl.h',
+ '../../src/profile-generator.cc',
+ '../../src/profile-generator.h',
+ '../../src/property-details.h',
+ '../../src/property.cc',
+ '../../src/property.h',
+ '../../src/regexp-macro-assembler-irregexp-inl.h',
+ '../../src/regexp-macro-assembler-irregexp.cc',
+ '../../src/regexp-macro-assembler-irregexp.h',
+ '../../src/regexp-macro-assembler-tracer.cc',
+ '../../src/regexp-macro-assembler-tracer.h',
+ '../../src/regexp-macro-assembler.cc',
+ '../../src/regexp-macro-assembler.h',
+ '../../src/regexp-stack.cc',
+ '../../src/regexp-stack.h',
+ '../../src/rewriter.cc',
+ '../../src/rewriter.h',
+ '../../src/runtime-profiler.cc',
+ '../../src/runtime-profiler.h',
+ '../../src/runtime.cc',
+ '../../src/runtime.h',
+ '../../src/safepoint-table.cc',
+ '../../src/safepoint-table.h',
+ '../../src/sampler.cc',
+ '../../src/sampler.h',
+ '../../src/scanner-character-streams.cc',
+ '../../src/scanner-character-streams.h',
+ '../../src/scanner.cc',
+ '../../src/scanner.h',
+ '../../src/scopeinfo.cc',
+ '../../src/scopeinfo.h',
+ '../../src/scopes.cc',
+ '../../src/scopes.h',
+ '../../src/serialize.cc',
+ '../../src/serialize.h',
+ '../../src/small-pointer-list.h',
+ '../../src/smart-pointers.h',
+ '../../src/snapshot-common.cc',
+ '../../src/snapshot.h',
+ '../../src/spaces-inl.h',
+ '../../src/spaces.cc',
+ '../../src/spaces.h',
+ '../../src/store-buffer-inl.h',
+ '../../src/store-buffer.cc',
+ '../../src/store-buffer.h',
+ '../../src/string-search.cc',
+ '../../src/string-search.h',
+ '../../src/string-stream.cc',
+ '../../src/string-stream.h',
+ '../../src/strtod.cc',
+ '../../src/strtod.h',
+ '../../src/stub-cache.cc',
+ '../../src/stub-cache.h',
+ '../../src/sweeper-thread.h',
+ '../../src/sweeper-thread.cc',
+ '../../src/token.cc',
+ '../../src/token.h',
+ '../../src/transitions-inl.h',
+ '../../src/transitions.cc',
+ '../../src/transitions.h',
+ '../../src/type-info.cc',
+ '../../src/type-info.h',
+ '../../src/unbound-queue-inl.h',
+ '../../src/unbound-queue.h',
+ '../../src/unicode-inl.h',
+ '../../src/unicode.cc',
+ '../../src/unicode.h',
+ '../../src/uri.h',
+ '../../src/utils-inl.h',
+ '../../src/utils.cc',
+ '../../src/utils.h',
+ '../../src/v8-counters.cc',
+ '../../src/v8-counters.h',
+ '../../src/v8.cc',
+ '../../src/v8.h',
+ '../../src/v8checks.h',
+ '../../src/v8conversions.cc',
+ '../../src/v8conversions.h',
+ '../../src/v8globals.h',
+ '../../src/v8memory.h',
+ '../../src/v8threads.cc',
+ '../../src/v8threads.h',
+ '../../src/v8utils.cc',
+ '../../src/v8utils.h',
+ '../../src/variables.cc',
+ '../../src/variables.h',
+ '../../src/version.cc',
+ '../../src/version.h',
+ '../../src/vm-state-inl.h',
+ '../../src/vm-state.h',
+ '../../src/zone-inl.h',
+ '../../src/zone.cc',
+ '../../src/zone.h',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['v8_target_arch=="arm"', {
+ 'sources': [ ### gcmole(arch:arm) ###
+ '../../src/arm/assembler-arm-inl.h',
+ '../../src/arm/assembler-arm.cc',
+ '../../src/arm/assembler-arm.h',
+ '../../src/arm/builtins-arm.cc',
+ '../../src/arm/code-stubs-arm.cc',
+ '../../src/arm/code-stubs-arm.h',
+ '../../src/arm/codegen-arm.cc',
+ '../../src/arm/codegen-arm.h',
+ '../../src/arm/constants-arm.h',
+ '../../src/arm/constants-arm.cc',
+ '../../src/arm/cpu-arm.cc',
+ '../../src/arm/debug-arm.cc',
+ '../../src/arm/deoptimizer-arm.cc',
+ '../../src/arm/disasm-arm.cc',
+ '../../src/arm/frames-arm.cc',
+ '../../src/arm/frames-arm.h',
+ '../../src/arm/full-codegen-arm.cc',
+ '../../src/arm/ic-arm.cc',
+ '../../src/arm/lithium-arm.cc',
+ '../../src/arm/lithium-arm.h',
+ '../../src/arm/lithium-codegen-arm.cc',
+ '../../src/arm/lithium-codegen-arm.h',
+ '../../src/arm/lithium-gap-resolver-arm.cc',
+ '../../src/arm/lithium-gap-resolver-arm.h',
+ '../../src/arm/macro-assembler-arm.cc',
+ '../../src/arm/macro-assembler-arm.h',
+ '../../src/arm/regexp-macro-assembler-arm.cc',
+ '../../src/arm/regexp-macro-assembler-arm.h',
+ '../../src/arm/simulator-arm.cc',
+ '../../src/arm/stub-cache-arm.cc',
],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
- '../../src/snapshot-empty.cc',
+ }],
+ ['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
+ 'sources': [ ### gcmole(arch:ia32) ###
+ '../../src/ia32/assembler-ia32-inl.h',
+ '../../src/ia32/assembler-ia32.cc',
+ '../../src/ia32/assembler-ia32.h',
+ '../../src/ia32/builtins-ia32.cc',
+ '../../src/ia32/code-stubs-ia32.cc',
+ '../../src/ia32/code-stubs-ia32.h',
+ '../../src/ia32/codegen-ia32.cc',
+ '../../src/ia32/codegen-ia32.h',
+ '../../src/ia32/cpu-ia32.cc',
+ '../../src/ia32/debug-ia32.cc',
+ '../../src/ia32/deoptimizer-ia32.cc',
+ '../../src/ia32/disasm-ia32.cc',
+ '../../src/ia32/frames-ia32.cc',
+ '../../src/ia32/frames-ia32.h',
+ '../../src/ia32/full-codegen-ia32.cc',
+ '../../src/ia32/ic-ia32.cc',
+ '../../src/ia32/lithium-codegen-ia32.cc',
+ '../../src/ia32/lithium-codegen-ia32.h',
+ '../../src/ia32/lithium-gap-resolver-ia32.cc',
+ '../../src/ia32/lithium-gap-resolver-ia32.h',
+ '../../src/ia32/lithium-ia32.cc',
+ '../../src/ia32/lithium-ia32.h',
+ '../../src/ia32/macro-assembler-ia32.cc',
+ '../../src/ia32/macro-assembler-ia32.h',
+ '../../src/ia32/regexp-macro-assembler-ia32.cc',
+ '../../src/ia32/regexp-macro-assembler-ia32.h',
+ '../../src/ia32/stub-cache-ia32.cc',
],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': ['js2c#host'],
- }, {
- 'toolsets': ['target'],
- 'dependencies': ['js2c'],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- 'V8_SHARED',
- ],
- }],
- ]
- },
- {
- 'target_name': 'v8_base.<(v8_target_arch)',
- 'type': 'static_library',
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '../../src',
+ }],
+ ['v8_target_arch=="mipsel"', {
+ 'sources': [ ### gcmole(arch:mipsel) ###
+ '../../src/mips/assembler-mips.cc',
+ '../../src/mips/assembler-mips.h',
+ '../../src/mips/assembler-mips-inl.h',
+ '../../src/mips/builtins-mips.cc',
+ '../../src/mips/codegen-mips.cc',
+ '../../src/mips/codegen-mips.h',
+ '../../src/mips/code-stubs-mips.cc',
+ '../../src/mips/code-stubs-mips.h',
+ '../../src/mips/constants-mips.cc',
+ '../../src/mips/constants-mips.h',
+ '../../src/mips/cpu-mips.cc',
+ '../../src/mips/debug-mips.cc',
+ '../../src/mips/deoptimizer-mips.cc',
+ '../../src/mips/disasm-mips.cc',
+ '../../src/mips/frames-mips.cc',
+ '../../src/mips/frames-mips.h',
+ '../../src/mips/full-codegen-mips.cc',
+ '../../src/mips/ic-mips.cc',
+ '../../src/mips/lithium-codegen-mips.cc',
+ '../../src/mips/lithium-codegen-mips.h',
+ '../../src/mips/lithium-gap-resolver-mips.cc',
+ '../../src/mips/lithium-gap-resolver-mips.h',
+ '../../src/mips/lithium-mips.cc',
+ '../../src/mips/lithium-mips.h',
+ '../../src/mips/macro-assembler-mips.cc',
+ '../../src/mips/macro-assembler-mips.h',
+ '../../src/mips/regexp-macro-assembler-mips.cc',
+ '../../src/mips/regexp-macro-assembler-mips.h',
+ '../../src/mips/simulator-mips.cc',
+ '../../src/mips/stub-cache-mips.cc',
],
- 'sources': [ ### gcmole(all) ###
- '../../src/accessors.cc',
- '../../src/accessors.h',
- '../../src/allocation.cc',
- '../../src/allocation.h',
- '../../src/api.cc',
- '../../src/api.h',
- '../../src/apiutils.h',
- '../../src/arguments.h',
- '../../src/assembler.cc',
- '../../src/assembler.h',
- '../../src/ast.cc',
- '../../src/ast.h',
- '../../src/atomicops.h',
- '../../src/atomicops_internals_x86_gcc.cc',
- '../../src/bignum-dtoa.cc',
- '../../src/bignum-dtoa.h',
- '../../src/bignum.cc',
- '../../src/bignum.h',
- '../../src/bootstrapper.cc',
- '../../src/bootstrapper.h',
- '../../src/builtins.cc',
- '../../src/builtins.h',
- '../../src/bytecodes-irregexp.h',
- '../../src/cached-powers.cc',
- '../../src/cached-powers.h',
- '../../src/char-predicates-inl.h',
- '../../src/char-predicates.h',
- '../../src/checks.cc',
- '../../src/checks.h',
- '../../src/circular-queue-inl.h',
- '../../src/circular-queue.cc',
- '../../src/circular-queue.h',
- '../../src/code-stubs.cc',
- '../../src/code-stubs.h',
- '../../src/code-stubs-hydrogen.cc',
- '../../src/code.h',
- '../../src/codegen.cc',
- '../../src/codegen.h',
- '../../src/compilation-cache.cc',
- '../../src/compilation-cache.h',
- '../../src/compiler.cc',
- '../../src/compiler.h',
- '../../src/contexts.cc',
- '../../src/contexts.h',
- '../../src/conversions-inl.h',
- '../../src/conversions.cc',
- '../../src/conversions.h',
- '../../src/counters.cc',
- '../../src/counters.h',
- '../../src/cpu-profiler-inl.h',
- '../../src/cpu-profiler.cc',
- '../../src/cpu-profiler.h',
- '../../src/cpu.h',
- '../../src/data-flow.cc',
- '../../src/data-flow.h',
- '../../src/date.cc',
- '../../src/date.h',
- '../../src/dateparser-inl.h',
- '../../src/dateparser.cc',
- '../../src/dateparser.h',
- '../../src/debug-agent.cc',
- '../../src/debug-agent.h',
- '../../src/debug.cc',
- '../../src/debug.h',
- '../../src/deoptimizer.cc',
- '../../src/deoptimizer.h',
- '../../src/disasm.h',
- '../../src/disassembler.cc',
- '../../src/disassembler.h',
- '../../src/diy-fp.cc',
- '../../src/diy-fp.h',
- '../../src/double.h',
- '../../src/dtoa.cc',
- '../../src/dtoa.h',
- '../../src/elements-kind.cc',
- '../../src/elements-kind.h',
- '../../src/elements.cc',
- '../../src/elements.h',
- '../../src/execution.cc',
- '../../src/execution.h',
- '../../src/extensions/externalize-string-extension.cc',
- '../../src/extensions/externalize-string-extension.h',
- '../../src/extensions/gc-extension.cc',
- '../../src/extensions/gc-extension.h',
- '../../src/extensions/statistics-extension.cc',
- '../../src/extensions/statistics-extension.h',
- '../../src/factory.cc',
- '../../src/factory.h',
- '../../src/fast-dtoa.cc',
- '../../src/fast-dtoa.h',
- '../../src/fixed-dtoa.cc',
- '../../src/fixed-dtoa.h',
- '../../src/flag-definitions.h',
- '../../src/flags.cc',
- '../../src/flags.h',
- '../../src/frames-inl.h',
- '../../src/frames.cc',
- '../../src/frames.h',
- '../../src/full-codegen.cc',
- '../../src/full-codegen.h',
- '../../src/func-name-inferrer.cc',
- '../../src/func-name-inferrer.h',
- '../../src/gdb-jit.cc',
- '../../src/gdb-jit.h',
- '../../src/global-handles.cc',
- '../../src/global-handles.h',
- '../../src/globals.h',
- '../../src/handles-inl.h',
- '../../src/handles.cc',
- '../../src/handles.h',
- '../../src/hashmap.h',
- '../../src/heap-inl.h',
- '../../src/heap-profiler.cc',
- '../../src/heap-profiler.h',
- '../../src/heap-snapshot-generator-inl.h',
- '../../src/heap-snapshot-generator.cc',
- '../../src/heap-snapshot-generator.h',
- '../../src/heap.cc',
- '../../src/heap.h',
- '../../src/hydrogen-instructions.cc',
- '../../src/hydrogen-instructions.h',
- '../../src/hydrogen.cc',
- '../../src/hydrogen.h',
- '../../src/ic-inl.h',
- '../../src/ic.cc',
- '../../src/ic.h',
- '../../src/incremental-marking.cc',
- '../../src/incremental-marking.h',
- '../../src/interface.cc',
- '../../src/interface.h',
- '../../src/interpreter-irregexp.cc',
- '../../src/interpreter-irregexp.h',
- '../../src/isolate.cc',
- '../../src/isolate.h',
- '../../src/json-parser.h',
- '../../src/json-stringifier.h',
- '../../src/jsregexp-inl.h',
- '../../src/jsregexp.cc',
- '../../src/jsregexp.h',
- '../../src/lazy-instance.h',
- '../../src/list-inl.h',
- '../../src/list.h',
- '../../src/lithium-allocator-inl.h',
- '../../src/lithium-allocator.cc',
- '../../src/lithium-allocator.h',
- '../../src/lithium.cc',
- '../../src/lithium.h',
- '../../src/liveedit.cc',
- '../../src/liveedit.h',
- '../../src/log-inl.h',
- '../../src/log-utils.cc',
- '../../src/log-utils.h',
- '../../src/log.cc',
- '../../src/log.h',
- '../../src/macro-assembler.h',
- '../../src/mark-compact.cc',
- '../../src/mark-compact.h',
- '../../src/marking-thread.h',
- '../../src/marking-thread.cc',
- '../../src/messages.cc',
- '../../src/messages.h',
- '../../src/natives.h',
- '../../src/objects-debug.cc',
- '../../src/objects-inl.h',
- '../../src/objects-printer.cc',
- '../../src/objects-visiting.cc',
- '../../src/objects-visiting.h',
- '../../src/objects.cc',
- '../../src/objects.h',
- '../../src/once.cc',
- '../../src/once.h',
- '../../src/optimizing-compiler-thread.h',
- '../../src/optimizing-compiler-thread.cc',
- '../../src/parser.cc',
- '../../src/parser.h',
- '../../src/platform-posix.h',
- '../../src/platform-tls-mac.h',
- '../../src/platform-tls-win32.h',
- '../../src/platform-tls.h',
- '../../src/platform.h',
- '../../src/preparse-data-format.h',
- '../../src/preparse-data.cc',
- '../../src/preparse-data.h',
- '../../src/preparser.cc',
- '../../src/preparser.h',
- '../../src/prettyprinter.cc',
- '../../src/prettyprinter.h',
- '../../src/profile-generator-inl.h',
- '../../src/profile-generator.cc',
- '../../src/profile-generator.h',
- '../../src/property-details.h',
- '../../src/property.cc',
- '../../src/property.h',
- '../../src/regexp-macro-assembler-irregexp-inl.h',
- '../../src/regexp-macro-assembler-irregexp.cc',
- '../../src/regexp-macro-assembler-irregexp.h',
- '../../src/regexp-macro-assembler-tracer.cc',
- '../../src/regexp-macro-assembler-tracer.h',
- '../../src/regexp-macro-assembler.cc',
- '../../src/regexp-macro-assembler.h',
- '../../src/regexp-stack.cc',
- '../../src/regexp-stack.h',
- '../../src/rewriter.cc',
- '../../src/rewriter.h',
- '../../src/runtime-profiler.cc',
- '../../src/runtime-profiler.h',
- '../../src/runtime.cc',
- '../../src/runtime.h',
- '../../src/safepoint-table.cc',
- '../../src/safepoint-table.h',
- '../../src/sampler.cc',
- '../../src/sampler.h',
- '../../src/scanner-character-streams.cc',
- '../../src/scanner-character-streams.h',
- '../../src/scanner.cc',
- '../../src/scanner.h',
- '../../src/scopeinfo.cc',
- '../../src/scopeinfo.h',
- '../../src/scopes.cc',
- '../../src/scopes.h',
- '../../src/serialize.cc',
- '../../src/serialize.h',
- '../../src/small-pointer-list.h',
- '../../src/smart-pointers.h',
- '../../src/snapshot-common.cc',
- '../../src/snapshot.h',
- '../../src/spaces-inl.h',
- '../../src/spaces.cc',
- '../../src/spaces.h',
- '../../src/store-buffer-inl.h',
- '../../src/store-buffer.cc',
- '../../src/store-buffer.h',
- '../../src/string-search.cc',
- '../../src/string-search.h',
- '../../src/string-stream.cc',
- '../../src/string-stream.h',
- '../../src/strtod.cc',
- '../../src/strtod.h',
- '../../src/stub-cache.cc',
- '../../src/stub-cache.h',
- '../../src/sweeper-thread.h',
- '../../src/sweeper-thread.cc',
- '../../src/token.cc',
- '../../src/token.h',
- '../../src/transitions-inl.h',
- '../../src/transitions.cc',
- '../../src/transitions.h',
- '../../src/type-info.cc',
- '../../src/type-info.h',
- '../../src/unbound-queue-inl.h',
- '../../src/unbound-queue.h',
- '../../src/unicode-inl.h',
- '../../src/unicode.cc',
- '../../src/unicode.h',
- '../../src/uri.h',
- '../../src/utils-inl.h',
- '../../src/utils.cc',
- '../../src/utils.h',
- '../../src/v8-counters.cc',
- '../../src/v8-counters.h',
- '../../src/v8.cc',
- '../../src/v8.h',
- '../../src/v8checks.h',
- '../../src/v8conversions.cc',
- '../../src/v8conversions.h',
- '../../src/v8globals.h',
- '../../src/v8memory.h',
- '../../src/v8threads.cc',
- '../../src/v8threads.h',
- '../../src/v8utils.cc',
- '../../src/v8utils.h',
- '../../src/variables.cc',
- '../../src/variables.h',
- '../../src/version.cc',
- '../../src/version.h',
- '../../src/vm-state-inl.h',
- '../../src/vm-state.h',
- '../../src/zone-inl.h',
- '../../src/zone.cc',
- '../../src/zone.h',
+ }],
+ ['v8_target_arch=="x64" or v8_target_arch=="mac" or OS=="mac"', {
+ 'sources': [ ### gcmole(arch:x64) ###
+ '../../src/x64/assembler-x64-inl.h',
+ '../../src/x64/assembler-x64.cc',
+ '../../src/x64/assembler-x64.h',
+ '../../src/x64/builtins-x64.cc',
+ '../../src/x64/code-stubs-x64.cc',
+ '../../src/x64/code-stubs-x64.h',
+ '../../src/x64/codegen-x64.cc',
+ '../../src/x64/codegen-x64.h',
+ '../../src/x64/cpu-x64.cc',
+ '../../src/x64/debug-x64.cc',
+ '../../src/x64/deoptimizer-x64.cc',
+ '../../src/x64/disasm-x64.cc',
+ '../../src/x64/frames-x64.cc',
+ '../../src/x64/frames-x64.h',
+ '../../src/x64/full-codegen-x64.cc',
+ '../../src/x64/ic-x64.cc',
+ '../../src/x64/lithium-codegen-x64.cc',
+ '../../src/x64/lithium-codegen-x64.h',
+ '../../src/x64/lithium-gap-resolver-x64.cc',
+ '../../src/x64/lithium-gap-resolver-x64.h',
+ '../../src/x64/lithium-x64.cc',
+ '../../src/x64/lithium-x64.h',
+ '../../src/x64/macro-assembler-x64.cc',
+ '../../src/x64/macro-assembler-x64.h',
+ '../../src/x64/regexp-macro-assembler-x64.cc',
+ '../../src/x64/regexp-macro-assembler-x64.h',
+ '../../src/x64/stub-cache-x64.cc',
],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- '../../src/arm/assembler-arm-inl.h',
- '../../src/arm/assembler-arm.cc',
- '../../src/arm/assembler-arm.h',
- '../../src/arm/builtins-arm.cc',
- '../../src/arm/code-stubs-arm.cc',
- '../../src/arm/code-stubs-arm.h',
- '../../src/arm/codegen-arm.cc',
- '../../src/arm/codegen-arm.h',
- '../../src/arm/constants-arm.h',
- '../../src/arm/constants-arm.cc',
- '../../src/arm/cpu-arm.cc',
- '../../src/arm/debug-arm.cc',
- '../../src/arm/deoptimizer-arm.cc',
- '../../src/arm/disasm-arm.cc',
- '../../src/arm/frames-arm.cc',
- '../../src/arm/frames-arm.h',
- '../../src/arm/full-codegen-arm.cc',
- '../../src/arm/ic-arm.cc',
- '../../src/arm/lithium-arm.cc',
- '../../src/arm/lithium-arm.h',
- '../../src/arm/lithium-codegen-arm.cc',
- '../../src/arm/lithium-codegen-arm.h',
- '../../src/arm/lithium-gap-resolver-arm.cc',
- '../../src/arm/lithium-gap-resolver-arm.h',
- '../../src/arm/macro-assembler-arm.cc',
- '../../src/arm/macro-assembler-arm.h',
- '../../src/arm/regexp-macro-assembler-arm.cc',
- '../../src/arm/regexp-macro-assembler-arm.h',
- '../../src/arm/simulator-arm.cc',
- '../../src/arm/stub-cache-arm.cc',
- ],
- }],
- ['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
- 'sources': [ ### gcmole(arch:ia32) ###
- '../../src/ia32/assembler-ia32-inl.h',
- '../../src/ia32/assembler-ia32.cc',
- '../../src/ia32/assembler-ia32.h',
- '../../src/ia32/builtins-ia32.cc',
- '../../src/ia32/code-stubs-ia32.cc',
- '../../src/ia32/code-stubs-ia32.h',
- '../../src/ia32/codegen-ia32.cc',
- '../../src/ia32/codegen-ia32.h',
- '../../src/ia32/cpu-ia32.cc',
- '../../src/ia32/debug-ia32.cc',
- '../../src/ia32/deoptimizer-ia32.cc',
- '../../src/ia32/disasm-ia32.cc',
- '../../src/ia32/frames-ia32.cc',
- '../../src/ia32/frames-ia32.h',
- '../../src/ia32/full-codegen-ia32.cc',
- '../../src/ia32/ic-ia32.cc',
- '../../src/ia32/lithium-codegen-ia32.cc',
- '../../src/ia32/lithium-codegen-ia32.h',
- '../../src/ia32/lithium-gap-resolver-ia32.cc',
- '../../src/ia32/lithium-gap-resolver-ia32.h',
- '../../src/ia32/lithium-ia32.cc',
- '../../src/ia32/lithium-ia32.h',
- '../../src/ia32/macro-assembler-ia32.cc',
- '../../src/ia32/macro-assembler-ia32.h',
- '../../src/ia32/regexp-macro-assembler-ia32.cc',
- '../../src/ia32/regexp-macro-assembler-ia32.h',
- '../../src/ia32/stub-cache-ia32.cc',
- ],
- }],
- ['v8_target_arch=="mipsel"', {
- 'sources': [ ### gcmole(arch:mipsel) ###
- '../../src/mips/assembler-mips.cc',
- '../../src/mips/assembler-mips.h',
- '../../src/mips/assembler-mips-inl.h',
- '../../src/mips/builtins-mips.cc',
- '../../src/mips/codegen-mips.cc',
- '../../src/mips/codegen-mips.h',
- '../../src/mips/code-stubs-mips.cc',
- '../../src/mips/code-stubs-mips.h',
- '../../src/mips/constants-mips.cc',
- '../../src/mips/constants-mips.h',
- '../../src/mips/cpu-mips.cc',
- '../../src/mips/debug-mips.cc',
- '../../src/mips/deoptimizer-mips.cc',
- '../../src/mips/disasm-mips.cc',
- '../../src/mips/frames-mips.cc',
- '../../src/mips/frames-mips.h',
- '../../src/mips/full-codegen-mips.cc',
- '../../src/mips/ic-mips.cc',
- '../../src/mips/lithium-codegen-mips.cc',
- '../../src/mips/lithium-codegen-mips.h',
- '../../src/mips/lithium-gap-resolver-mips.cc',
- '../../src/mips/lithium-gap-resolver-mips.h',
- '../../src/mips/lithium-mips.cc',
- '../../src/mips/lithium-mips.h',
- '../../src/mips/macro-assembler-mips.cc',
- '../../src/mips/macro-assembler-mips.h',
- '../../src/mips/regexp-macro-assembler-mips.cc',
- '../../src/mips/regexp-macro-assembler-mips.h',
- '../../src/mips/simulator-mips.cc',
- '../../src/mips/stub-cache-mips.cc',
- ],
- }],
- ['v8_target_arch=="x64" or v8_target_arch=="mac" or OS=="mac"', {
- 'sources': [ ### gcmole(arch:x64) ###
- '../../src/x64/assembler-x64-inl.h',
- '../../src/x64/assembler-x64.cc',
- '../../src/x64/assembler-x64.h',
- '../../src/x64/builtins-x64.cc',
- '../../src/x64/code-stubs-x64.cc',
- '../../src/x64/code-stubs-x64.h',
- '../../src/x64/codegen-x64.cc',
- '../../src/x64/codegen-x64.h',
- '../../src/x64/cpu-x64.cc',
- '../../src/x64/debug-x64.cc',
- '../../src/x64/deoptimizer-x64.cc',
- '../../src/x64/disasm-x64.cc',
- '../../src/x64/frames-x64.cc',
- '../../src/x64/frames-x64.h',
- '../../src/x64/full-codegen-x64.cc',
- '../../src/x64/ic-x64.cc',
- '../../src/x64/lithium-codegen-x64.cc',
- '../../src/x64/lithium-codegen-x64.h',
- '../../src/x64/lithium-gap-resolver-x64.cc',
- '../../src/x64/lithium-gap-resolver-x64.h',
- '../../src/x64/lithium-x64.cc',
- '../../src/x64/lithium-x64.h',
- '../../src/x64/macro-assembler-x64.cc',
- '../../src/x64/macro-assembler-x64.h',
- '../../src/x64/regexp-macro-assembler-x64.cc',
- '../../src/x64/regexp-macro-assembler-x64.h',
- '../../src/x64/stub-cache-x64.cc',
+ }],
+ ['OS=="linux"', {
+ 'link_settings': {
+ 'conditions': [
+ ['v8_compress_startup_data=="bz2"', {
+ 'libraries': [
+ '-lbz2',
+ ]
+ }],
],
- }],
- ['OS=="linux"', {
- 'link_settings': {
- 'conditions': [
- ['v8_compress_startup_data=="bz2"', {
- 'libraries': [
- '-lbz2',
- ]
- }],
- ],
- },
- 'sources': [ ### gcmole(os:linux) ###
- '../../src/platform-linux.cc',
- '../../src/platform-posix.cc'
- ],
- }
+ },
+ 'sources': [ ### gcmole(os:linux) ###
+ '../../src/platform-linux.cc',
+ '../../src/platform-posix.cc'
],
- ['OS=="android"', {
- 'defines': [
- 'CAN_USE_VFP_INSTRUCTIONS',
- ],
- 'sources': [
- '../../src/platform-posix.cc',
- ],
- 'conditions': [
- ['host_os=="mac"', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'sources': [
- '../../src/platform-macos.cc'
- ]
- }, {
- 'sources': [
- '../../src/platform-linux.cc'
- ]
- }],
- ],
+ }
+ ],
+ ['OS=="android"', {
+ 'defines': [
+ 'CAN_USE_VFP_INSTRUCTIONS',
+ ],
+ 'sources': [
+ '../../src/platform-posix.cc',
+ ],
+ 'conditions': [
+ ['host_os=="mac"', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'sources': [
+ '../../src/platform-macos.cc'
+ ]
}, {
'sources': [
'../../src/platform-linux.cc'
]
}],
],
- },
- ],
- ['OS=="freebsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
+ }, {
'sources': [
- '../../src/platform-freebsd.cc',
- '../../src/platform-posix.cc'
- ],
- }
+ '../../src/platform-linux.cc'
+ ]
+ }],
],
- ['OS=="openbsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
- 'sources': [
- '../../src/platform-openbsd.cc',
- '../../src/platform-posix.cc'
- ],
- }
+ },
+ ],
+ ['OS=="freebsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/local/lib -lexecinfo',
+ ]},
+ 'sources': [
+ '../../src/platform-freebsd.cc',
+ '../../src/platform-posix.cc'
],
- ['OS=="netbsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
- ]},
- 'sources': [
- '../../src/platform-openbsd.cc',
- '../../src/platform-posix.cc'
- ],
- }
+ }
+ ],
+ ['OS=="openbsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/local/lib -lexecinfo',
+ ]},
+ 'sources': [
+ '../../src/platform-openbsd.cc',
+ '../../src/platform-posix.cc'
],
- ['OS=="solaris"', {
- 'link_settings': {
- 'libraries': [
- '-lsocket -lnsl',
- ]},
- 'sources': [
- '../../src/platform-solaris.cc',
- '../../src/platform-posix.cc',
- ],
- }
+ }
+ ],
+ ['OS=="netbsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
+ ]},
+ 'sources': [
+ '../../src/platform-openbsd.cc',
+ '../../src/platform-posix.cc'
],
- ['OS=="mac"', {
- 'sources': [
- '../../src/platform-macos.cc',
- '../../src/platform-posix.cc'
- ]},
+ }
+ ],
+ ['OS=="solaris"', {
+ 'link_settings': {
+ 'libraries': [
+ '-lsocket -lnsl',
+ ]},
+ 'sources': [
+ '../../src/platform-solaris.cc',
+ '../../src/platform-posix.cc',
],
- ['OS=="win"', {
+ }
+ ],
+ ['OS=="mac"', {
+ 'sources': [
+ '../../src/platform-macos.cc',
+ '../../src/platform-posix.cc'
+ ]},
+ ],
+ ['OS=="win"', {
+ 'variables': {
+ 'gyp_generators': '<!(echo $GYP_GENERATORS)',
+ },
+ 'conditions': [
+ ['gyp_generators=="make"', {
'variables': {
- 'gyp_generators': '<!(echo $GYP_GENERATORS)',
+ 'build_env': '<!(uname -o)',
},
'conditions': [
- ['gyp_generators=="make"', {
- 'variables': {
- 'build_env': '<!(uname -o)',
- },
- 'conditions': [
- ['build_env=="Cygwin"', {
- 'sources': [
- '../../src/platform-cygwin.cc',
- '../../src/platform-posix.cc',
- ],
- }, {
- 'sources': [
- '../../src/platform-win32.cc',
- '../../src/win32-math.h',
- '../../src/win32-math.cc',
- ],
- }],
+ ['build_env=="Cygwin"', {
+ 'sources': [
+ '../../src/platform-cygwin.cc',
+ '../../src/platform-posix.cc',
],
- 'link_settings': {
- 'libraries': [ '-lwinmm', '-lws2_32' ],
- },
}, {
'sources': [
'../../src/platform-win32.cc',
'../../src/win32-math.h',
'../../src/win32-math.cc',
],
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- 'link_settings': {
- 'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
- },
}],
],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- 'V8_SHARED',
- ],
- }],
- ['v8_postmortem_support=="true"', {
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
- ]
- }],
- ],
- },
- {
- 'target_name': 'js2c',
- 'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
+ 'link_settings': {
+ 'libraries': [ '-lwinmm', '-lws2_32' ],
+ },
}, {
- 'toolsets': ['target'],
+ 'sources': [
+ '../../src/platform-win32.cc',
+ '../../src/win32-math.h',
+ '../../src/win32-math.cc',
+ ],
+ 'msvs_disabled_warnings': [4351, 4355, 4800],
+ 'link_settings': {
+ 'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
+ },
}],
],
- 'variables': {
- 'library_files': [
- '../../src/runtime.js',
- '../../src/v8natives.js',
- '../../src/array.js',
- '../../src/string.js',
- '../../src/uri.js',
- '../../src/math.js',
- '../../src/messages.js',
- '../../src/apinatives.js',
- '../../src/debug-debugger.js',
- '../../src/mirror-debugger.js',
- '../../src/liveedit-debugger.js',
- '../../src/date.js',
- '../../src/json.js',
- '../../src/regexp.js',
- '../../src/macros.py',
- ],
- 'experimental_library_files': [
- '../../src/macros.py',
- '../../src/symbol.js',
- '../../src/proxy.js',
- '../../src/collection.js',
- '../../src/object-observe.js',
- '../../src/arraybuffer.js',
- '../../src/typedarray.js',
- '../../src/generator.js'
- ],
- },
- 'actions': [
- {
- 'action_name': 'js2c',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(library_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- ],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<@(_outputs)',
- 'CORE',
- '<(v8_compress_startup_data)',
- '<@(library_files)'
- ],
- },
- {
- 'action_name': 'js2c_experimental',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(experimental_library_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
- ],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<@(_outputs)',
- 'EXPERIMENTAL',
- '<(v8_compress_startup_data)',
- '<@(experimental_library_files)'
- ],
- },
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ 'V8_SHARED',
],
- },
- {
- 'target_name': 'postmortem-metadata',
- 'type': 'none',
- 'variables': {
- 'heapobject_files': [
- '../../src/objects.h',
- '../../src/objects-inl.h',
- ],
- },
- 'actions': [
- {
- 'action_name': 'gen-postmortem-metadata',
- 'inputs': [
- '../../tools/gen-postmortem-metadata.py',
- '<@(heapobject_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
- ],
- 'action': [
- 'python',
- '../../tools/gen-postmortem-metadata.py',
- '<@(_outputs)',
- '<@(heapobject_files)'
- ]
- }
- ]
- },
+ }],
+ ['v8_postmortem_support=="true"', {
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+ ]
+ }],
+ ],
+ },
+ {
+ 'target_name': 'js2c',
+ 'type': 'none',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ 'variables': {
+ 'library_files': [
+ '../../src/runtime.js',
+ '../../src/v8natives.js',
+ '../../src/array.js',
+ '../../src/string.js',
+ '../../src/uri.js',
+ '../../src/math.js',
+ '../../src/messages.js',
+ '../../src/apinatives.js',
+ '../../src/debug-debugger.js',
+ '../../src/mirror-debugger.js',
+ '../../src/liveedit-debugger.js',
+ '../../src/date.js',
+ '../../src/json.js',
+ '../../src/regexp.js',
+ '../../src/macros.py',
+ ],
+ 'experimental_library_files': [
+ '../../src/macros.py',
+ '../../src/symbol.js',
+ '../../src/proxy.js',
+ '../../src/collection.js',
+ '../../src/object-observe.js',
+ '../../src/arraybuffer.js',
+ '../../src/typedarray.js',
+ '../../src/generator.js'
+ ],
+ },
+ 'actions': [
{
- 'target_name': 'mksnapshot.<(v8_target_arch)',
- 'type': 'executable',
- 'dependencies': [
- 'v8_base.<(v8_target_arch)',
- 'v8_nosnapshot.<(v8_target_arch)',
+ 'action_name': 'js2c',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(library_files)',
],
- 'include_dirs+': [
- '../../src',
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
],
- 'sources': [
- '../../src/mksnapshot.cc',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ['v8_compress_startup_data=="bz2"', {
- 'libraries': [
- '-lbz2',
- ]
- }],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<@(_outputs)',
+ 'CORE',
+ '<(v8_compress_startup_data)',
+ '<@(library_files)'
],
},
{
- 'target_name': 'v8_shell',
- 'type': 'executable',
- 'dependencies': [
- 'v8'
+ 'action_name': 'js2c_experimental',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(experimental_library_files)',
],
- 'sources': [
- '../../samples/shell.cc',
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ['OS=="win"', {
- # This could be gotten by not setting chromium_code, if that's OK.
- 'defines': ['_CRT_SECURE_NO_WARNINGS'],
- }],
- ['v8_compress_startup_data=="bz2"', {
- 'libraries': [
- '-lbz2',
- ]
- }],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<@(_outputs)',
+ 'EXPERIMENTAL',
+ '<(v8_compress_startup_data)',
+ '<@(experimental_library_files)'
],
},
],
- }, { # use_system_v8 != 0
- 'targets': [
- {
- 'target_name': 'v8',
- 'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- 'variables': {
- 'shim_headers_path': '<(SHARED_INTERMEDIATE_DIR)/shim_headers/<(_target_name)/<(_toolset)',
- },
- 'include_dirs++': [
- '<(shim_headers_path)',
- ],
- 'all_dependent_settings': {
- 'include_dirs+++': [
- '<(shim_headers_path)',
+ },
+ {
+ 'target_name': 'postmortem-metadata',
+ 'type': 'none',
+ 'variables': {
+ 'heapobject_files': [
+ '../../src/objects.h',
+ '../../src/objects-inl.h',
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'gen-postmortem-metadata',
+ 'inputs': [
+ '../../tools/gen-postmortem-metadata.py',
+ '<@(heapobject_files)',
],
- },
- 'actions': [
- {
- 'variables': {
- 'generator_path': '../../../tools/generate_shim_headers/generate_shim_headers.py',
- 'generator_args': [
- '--headers-root', '../../include',
- '--output-directory', '<(shim_headers_path)',
- 'v8-debug.h',
- 'v8-preparser.h',
- 'v8-profiler.h',
- 'v8-testing.h',
- 'v8.h',
- 'v8stdint.h',
- ],
- },
- 'action_name': 'generate_<(_target_name)_shim_headers',
- 'inputs': [
- '<(generator_path)',
- ],
- 'outputs': [
- '<!@pymod_do_main(generate_shim_headers <@(generator_args) --outputs)',
- ],
- 'action': ['python',
- '<(generator_path)',
- '<@(generator_args)',
- '--generate',
- ],
- 'message': 'Generating <(_target_name) shim headers.',
- },
- ],
- 'link_settings': {
- 'libraries': [
- '-lv8',
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
],
- },
- },
- {
- 'target_name': 'v8_shell',
- 'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- 'dependencies': [
- 'v8'
- ],
- },
+ 'action': [
+ 'python',
+ '../../tools/gen-postmortem-metadata.py',
+ '<@(_outputs)',
+ '<@(heapobject_files)'
+ ]
+ }
+ ]
+ },
+ {
+ 'target_name': 'mksnapshot.<(v8_target_arch)',
+ 'type': 'executable',
+ 'dependencies': [
+ 'v8_base.<(v8_target_arch)',
+ 'v8_nosnapshot.<(v8_target_arch)',
+ ],
+ 'include_dirs+': [
+ '../../src',
+ ],
+ 'sources': [
+ '../../src/mksnapshot.cc',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['v8_compress_startup_data=="bz2"', {
+ 'libraries': [
+ '-lbz2',
+ ]
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_shell',
+ 'type': 'executable',
+ 'dependencies': [
+ 'v8'
+ ],
+ 'sources': [
+ '../../samples/shell.cc',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['OS=="win"', {
+ # This could be gotten by not setting chromium_code, if that's OK.
+ 'defines': ['_CRT_SECURE_NO_WARNINGS'],
+ }],
+ ['v8_compress_startup_data=="bz2"', {
+ 'libraries': [
+ '-lbz2',
+ ]
+ }],
],
- }],
+ },
],
}