summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/ChangeLog107
-rw-r--r--deps/v8/DEPS27
-rw-r--r--deps/v8/Makefile7
-rw-r--r--deps/v8/SConstruct17
-rw-r--r--deps/v8/build/common.gypi287
-rwxr-xr-xdeps/v8/build/gyp_v836
-rw-r--r--deps/v8/build/standalone.gypi12
-rw-r--r--deps/v8/include/v8.h8
-rwxr-xr-xdeps/v8/src/SConscript1
-rw-r--r--deps/v8/src/api.cc37
-rw-r--r--deps/v8/src/api.h4
-rw-r--r--deps/v8/src/arm/builtins-arm.cc9
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc35
-rw-r--r--deps/v8/src/arm/codegen-arm.cc4
-rw-r--r--deps/v8/src/arm/debug-arm.cc4
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc7
-rw-r--r--deps/v8/src/arm/ic-arm.cc61
-rw-r--r--deps/v8/src/arm/lithium-arm.cc5
-rw-r--r--deps/v8/src/arm/lithium-arm.h9
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc154
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc88
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h9
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc170
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h13
-rw-r--r--deps/v8/src/arm/simulator-arm.h12
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc50
-rw-r--r--deps/v8/src/bootstrapper.cc20
-rw-r--r--deps/v8/src/builtins.cc87
-rw-r--r--deps/v8/src/code-stubs.cc32
-rw-r--r--deps/v8/src/code-stubs.h1
-rw-r--r--deps/v8/src/codegen.h6
-rw-r--r--deps/v8/src/contexts.h20
-rw-r--r--deps/v8/src/d8.cc61
-rw-r--r--deps/v8/src/d8.h2
-rw-r--r--deps/v8/src/debug-agent.cc32
-rw-r--r--deps/v8/src/debug.cc42
-rw-r--r--deps/v8/src/debug.h50
-rw-r--r--deps/v8/src/elements-kind.cc134
-rw-r--r--deps/v8/src/elements-kind.h210
-rw-r--r--deps/v8/src/elements.cc546
-rw-r--r--deps/v8/src/elements.h64
-rw-r--r--deps/v8/src/factory.cc5
-rw-r--r--deps/v8/src/factory.h17
-rw-r--r--deps/v8/src/flag-definitions.h3
-rw-r--r--deps/v8/src/frames.h3
-rw-r--r--deps/v8/src/func-name-inferrer.h2
-rw-r--r--deps/v8/src/globals.h3
-rw-r--r--deps/v8/src/heap-inl.h20
-rw-r--r--deps/v8/src/heap.cc70
-rw-r--r--deps/v8/src/heap.h20
-rw-r--r--deps/v8/src/hydrogen-instructions.cc33
-rw-r--r--deps/v8/src/hydrogen-instructions.h182
-rw-r--r--deps/v8/src/hydrogen.cc365
-rw-r--r--deps/v8/src/hydrogen.h3
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h3
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc9
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc38
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc4
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc31
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc16
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc38
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc189
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h3
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc8
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h12
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc90
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h9
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc156
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h13
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h8
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc27
-rw-r--r--deps/v8/src/ic.cc97
-rw-r--r--deps/v8/src/ic.h20
-rw-r--r--deps/v8/src/incremental-marking-inl.h26
-rw-r--r--deps/v8/src/incremental-marking.cc43
-rw-r--r--deps/v8/src/incremental-marking.h15
-rw-r--r--deps/v8/src/isolate.h2
-rw-r--r--deps/v8/src/jsregexp.cc88
-rw-r--r--deps/v8/src/jsregexp.h106
-rw-r--r--deps/v8/src/list-inl.h8
-rw-r--r--deps/v8/src/list.h3
-rw-r--r--deps/v8/src/lithium.cc7
-rw-r--r--deps/v8/src/liveedit.cc62
-rw-r--r--deps/v8/src/mark-compact-inl.h28
-rw-r--r--deps/v8/src/mark-compact.cc234
-rw-r--r--deps/v8/src/mark-compact.h64
-rw-r--r--deps/v8/src/messages.js230
-rw-r--r--deps/v8/src/mips/builtins-mips.cc9
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc38
-rw-r--r--deps/v8/src/mips/codegen-mips.cc4
-rw-r--r--deps/v8/src/mips/debug-mips.cc4
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc8
-rw-r--r--deps/v8/src/mips/ic-mips.cc68
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc158
-rw-r--r--deps/v8/src/mips/lithium-mips.cc5
-rw-r--r--deps/v8/src/mips/lithium-mips.h10
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc89
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h9
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc160
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.h11
-rw-r--r--deps/v8/src/mips/simulator-mips.h10
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc66
-rw-r--r--deps/v8/src/objects-debug.cc109
-rw-r--r--deps/v8/src/objects-inl.h325
-rw-r--r--deps/v8/src/objects-printer.cc5
-rw-r--r--deps/v8/src/objects.cc815
-rw-r--r--deps/v8/src/objects.h208
-rw-r--r--deps/v8/src/parser.cc20
-rw-r--r--deps/v8/src/platform-freebsd.cc1
-rw-r--r--deps/v8/src/platform-posix.cc35
-rw-r--r--deps/v8/src/platform-win32.cc18
-rw-r--r--deps/v8/src/platform.h3
-rw-r--r--deps/v8/src/profile-generator-inl.h27
-rw-r--r--deps/v8/src/profile-generator.cc1122
-rw-r--r--deps/v8/src/profile-generator.h311
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc3
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.h4
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc11
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.h2
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc10
-rw-r--r--deps/v8/src/regexp-macro-assembler.h12
-rw-r--r--deps/v8/src/regexp.js6
-rw-r--r--deps/v8/src/runtime.cc447
-rw-r--r--deps/v8/src/runtime.h7
-rw-r--r--deps/v8/src/spaces.cc2
-rw-r--r--deps/v8/src/string-stream.cc4
-rw-r--r--deps/v8/src/v8-counters.h2
-rw-r--r--deps/v8/src/v8utils.h9
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/x64/assembler-x64.h3
-rw-r--r--deps/v8/src/x64/builtins-x64.cc9
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc41
-rw-r--r--deps/v8/src/x64/codegen-x64.cc4
-rw-r--r--deps/v8/src/x64/debug-x64.cc15
-rw-r--r--deps/v8/src/x64/disasm-x64.cc2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc20
-rw-r--r--deps/v8/src/x64/ic-x64.cc36
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc218
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h3
-rw-r--r--deps/v8/src/x64/lithium-x64.cc5
-rw-r--r--deps/v8/src/x64/lithium-x64.h9
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc90
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h9
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc173
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h25
-rw-r--r--deps/v8/src/x64/simulator-x64.h8
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc41
-rw-r--r--deps/v8/test/cctest/cctest.status1
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc38
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc98
-rw-r--r--deps/v8/test/cctest/test-heap.cc65
-rw-r--r--deps/v8/test/cctest/test-list.cc12
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc10
-rw-r--r--deps/v8/test/cctest/test-regexp.cc22
-rw-r--r--deps/v8/test/cctest/testcfg.py2
-rw-r--r--deps/v8/test/mjsunit/accessor-map-sharing.js176
-rw-r--r--deps/v8/test/mjsunit/array-construct-transition.js6
-rw-r--r--deps/v8/test/mjsunit/array-literal-transitions.js20
-rw-r--r--deps/v8/test/mjsunit/big-array-literal.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-construct.js6
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-stack-padding.js88
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js8
-rw-r--r--deps/v8/test/mjsunit/elements-transition-hoisting.js4
-rw-r--r--deps/v8/test/mjsunit/elements-transition.js10
-rw-r--r--deps/v8/test/mjsunit/error-constructors.js101
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status1
-rw-r--r--deps/v8/test/mjsunit/packed-elements.js112
-rw-r--r--deps/v8/test/mjsunit/regexp-capture-3.js3
-rwxr-xr-xdeps/v8/test/mjsunit/regexp-capture.js2
-rw-r--r--deps/v8/test/mjsunit/regexp-global.js132
-rw-r--r--deps/v8/test/mjsunit/regexp.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-117409.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-128146.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1639-2.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1639.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1849.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1878.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2153.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-122271.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-smi-only-concat.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-transcendental.js49
-rw-r--r--deps/v8/test/mjsunit/stack-traces.js14
-rw-r--r--deps/v8/test/mjsunit/unbox-double-arrays.js7
-rw-r--r--deps/v8/test/test262/testcfg.py7
-rw-r--r--deps/v8/tools/check-static-initializers.sh14
-rw-r--r--deps/v8/tools/fuzz-harness.sh92
-rwxr-xr-xdeps/v8/tools/grokdump.py281
-rw-r--r--deps/v8/tools/gyp/v8.gyp60
-rw-r--r--deps/v8/tools/js2c.py6
-rw-r--r--deps/v8/tools/jsmin.py4
-rwxr-xr-xdeps/v8/tools/presubmit.py3
-rwxr-xr-xdeps/v8/tools/test-wrapper-gypbuild.py36
192 files changed, 4208 insertions, 7189 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index f64809dd69..09c02378f5 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,110 +1,3 @@
-2012-05-29: Version 3.11.7
-
- Get better function names in stack traces.
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-24: Version 3.11.6
-
- Fixed RegExp.prototype.toString for incompatible receivers
- (issue 1981).
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-23: Version 3.11.5
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-22: Version 3.11.4
-
- Some cleanup to common.gypi. This fixes some host/target combinations
- that weren't working in the Make build on Mac.
-
- Handle EINTR in socket functions and continue incomplete sends.
- (issue 2098)
-
- Fixed python deprecations. (issue 1391)
-
- Made socket send and receive more robust and return 0 on failure.
- (Chromium issue 15719)
-
- Fixed GCC 4.7 (C++11) compilation. (issue 2136)
-
- Set '-m32' option for host and target platforms
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-18: Version 3.11.3
-
- Disable optimization for functions that have scopes that cannot be
- reconstructed from the context chain. (issue 2071)
-
- Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078)
-
- Correctly check for native error objects. (Chromium issue 2138)
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-16: Version 3.11.2
-
- Revert r11496. (Chromium issue 128146)
-
- Implement map collection for incremental marking. (issue 1465)
-
- Add toString method to CallSite (which describes a frame of the
- stack trace).
-
-
-2012-05-15: Version 3.11.1
-
- Added a readbuffer function to d8 that reads a file into an ArrayBuffer.
-
- Fix freebsd build. (V8 issue 2126)
-
- Performance and stability improvements on all platforms.
-
-
-2012-05-11: Version 3.11.0
-
- Fixed compose-discard crasher from r11524 (issue 2123).
-
- Activated new global semantics by default. Global variables can
- now shadow properties of the global object (ES5.1 erratum).
-
- Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when
- transitioning (Chromium issue 117409).
-
- Made Error.prototype.name writable again, as required by the spec and
- the web (Chromium issue 69187).
-
- Implemented map collection with incremental marking (issue 1465).
-
- Regexp: Fixed overflow in min-match-length calculation
- (Chromium issue 126412).
-
- MIPS: Fixed illegal instruction use on Loongson in code for
- Math.random() (issue 2115).
-
- Fixed crash bug in VisitChoice (Chromium issue 126272).
-
- Fixed unsigned-Smi check in MappedArgumentsLookup
- (Chromium issue 126414).
-
- Fixed LiveEdit for function with no locals (issue 825).
-
- Fixed register clobbering in LoadIC for interceptors
- (Chromium issue 125988).
-
- Implemented clearing of CompareICs (issue 2102).
-
- Performance and stability improvements on all platforms.
-
-
2012-05-03: Version 3.10.8
Enabled MIPS cross-compilation.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
deleted file mode 100644
index e50d1d20f6..0000000000
--- a/deps/v8/DEPS
+++ /dev/null
@@ -1,27 +0,0 @@
-# Note: The buildbots evaluate this file with CWD set to the parent
-# directory and assume that the root of the checkout is in ./v8/, so
-# all paths in here must match this assumption.
-
-deps = {
- # Remember to keep the revision in sync with the Makefile.
- "v8/build/gyp":
- "http://gyp.googlecode.com/svn/trunk@1282",
-}
-
-deps_os = {
- "win": {
- "v8/third_party/cygwin":
- "http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844",
-
- "v8/third_party/python_26":
- "http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111",
- }
-}
-
-hooks = [
- {
- # A change to a .gyp, .gypi, or to GYP itself should run the generator.
- "pattern": ".",
- "action": ["python", "v8/build/gyp_v8"],
- },
-]
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 0d825c0795..277c1f786d 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -137,12 +137,6 @@ ENVFILE = $(OUTDIR)/environment
# Target definitions. "all" is the default.
all: $(MODES)
-# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
-# having been created before.
-buildbot:
- $(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
- builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
-
# Compile targets. MODES and ARCHES are convenience targets.
.SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
@@ -228,7 +222,6 @@ $(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT
GYP_GENERATORS=make \
CC="${ANDROID_TOOL_PREFIX}-gcc" \
- CXX="${ANDROID_TOOL_PREFIX}-g++" \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.android $(GYPFLAGS)
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index ebce7ff892..34d0efc5ff 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -101,14 +101,14 @@ LIBRARY_FLAGS = {
'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': {
- 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
+ 'CPPDEFINES': ['V8_SHARED'],
'LIBS': ['pthread']
}
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
'library:shared': {
- 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
+ 'CPPDEFINES': ['V8_SHARED']
}
},
'os:freebsd': {
@@ -1601,17 +1601,4 @@ except:
pass
-def WarnAboutDeprecation():
- print """
-#######################################################
-# WARNING: Building V8 with SCons is deprecated and #
-# will not work much longer. Please switch to using #
-# the GYP-based build now. Instructions are at #
-# http://code.google.com/p/v8/wiki/BuildingWithGYP. #
-#######################################################
- """
-
-WarnAboutDeprecation()
-import atexit
-atexit.register(WarnAboutDeprecation)
Build()
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi
index 2d8dc11b70..45195f12d1 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/common.gypi
@@ -110,117 +110,151 @@
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
- ['v8_target_arch=="arm"', {
- 'defines': [
- 'V8_TARGET_ARCH_ARM',
- ],
+ ['OS!="mac"', {
+ # TODO(mark): The OS!="mac" conditional is temporary. It can be
+ # removed once the Mac Chromium build stops setting target_arch to
+ # ia32 and instead sets it to mac. Other checks in this file for
+ # OS=="mac" can be removed at that time as well. This can be cleaned
+ # up once http://crbug.com/44205 is fixed.
'conditions': [
- [ 'v8_can_use_unaligned_accesses=="true"', {
+ ['v8_target_arch=="arm"', {
'defines': [
- 'CAN_USE_UNALIGNED_ACCESSES=1',
+ 'V8_TARGET_ARCH_ARM',
],
- }],
- [ 'v8_can_use_unaligned_accesses=="false"', {
- 'defines': [
- 'CAN_USE_UNALIGNED_ACCESSES=0',
+ 'conditions': [
+ [ 'v8_can_use_unaligned_accesses=="true"', {
+ 'defines': [
+ 'CAN_USE_UNALIGNED_ACCESSES=1',
+ ],
+ }],
+ [ 'v8_can_use_unaligned_accesses=="false"', {
+ 'defines': [
+ 'CAN_USE_UNALIGNED_ACCESSES=0',
+ ],
+ }],
+ [ 'v8_can_use_vfp_instructions=="true"', {
+ 'defines': [
+ 'CAN_USE_VFP_INSTRUCTIONS',
+ ],
+ }],
+ [ 'v8_use_arm_eabi_hardfloat=="true"', {
+ 'defines': [
+ 'USE_EABI_HARDFLOAT=1',
+ 'CAN_USE_VFP_INSTRUCTIONS',
+ ],
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': ['-mfloat-abi=hard',],
+ }],
+ ],
+ }, {
+ 'defines': [
+ 'USE_EABI_HARDFLOAT=0',
+ ],
+ }],
+ # The ARM assembler assumes the host is 32 bits,
+ # so force building 32-bit host tools.
+ ['host_arch=="x64" or OS=="android"', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'cflags': ['-m32'],
+ 'ldflags': ['-m32'],
+ }],
+ ],
+ }],
],
}],
- [ 'v8_can_use_vfp_instructions=="true"', {
+ ['v8_target_arch=="ia32"', {
'defines': [
- 'CAN_USE_VFP_INSTRUCTIONS',
+ 'V8_TARGET_ARCH_IA32',
],
}],
- [ 'v8_use_arm_eabi_hardfloat=="true"', {
- 'defines': [
- 'USE_EABI_HARDFLOAT=1',
- 'CAN_USE_VFP_INSTRUCTIONS',
- ],
- 'target_conditions': [
- ['_toolset=="target"', {
- 'cflags': ['-mfloat-abi=hard',],
- }],
- ],
- }, {
+ ['v8_target_arch=="mips"', {
'defines': [
- 'USE_EABI_HARDFLOAT=0',
+ 'V8_TARGET_ARCH_MIPS',
],
- }],
- ],
- }], # v8_target_arch=="arm"
- ['v8_target_arch=="ia32"', {
- 'defines': [
- 'V8_TARGET_ARCH_IA32',
- ],
- }], # v8_target_arch=="ia32"
- ['v8_target_arch=="mips"', {
- 'defines': [
- 'V8_TARGET_ARCH_MIPS',
- ],
- 'variables': {
- 'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
- },
- 'conditions': [
- ['mipscompiler=="yes"', {
- 'target_conditions': [
- ['_toolset=="target"', {
- 'cflags': ['-EL'],
- 'ldflags': ['-EL'],
- 'conditions': [
- [ 'v8_use_mips_abi_hardfloat=="true"', {
- 'cflags': ['-mhard-float'],
- 'ldflags': ['-mhard-float'],
- }, {
- 'cflags': ['-msoft-float'],
- 'ldflags': ['-msoft-float'],
- }],
- ['mips_arch_variant=="mips32r2"', {
- 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+ 'variables': {
+ 'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
+ },
+ 'conditions': [
+ ['mipscompiler=="yes"', {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': ['-EL'],
+ 'ldflags': ['-EL'],
+ 'conditions': [
+ [ 'v8_use_mips_abi_hardfloat=="true"', {
+ 'cflags': ['-mhard-float'],
+ 'ldflags': ['-mhard-float'],
+ }, {
+ 'cflags': ['-msoft-float'],
+ 'ldflags': ['-msoft-float'],
+ }],
+ ['mips_arch_variant=="mips32r2"', {
+ 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+ }],
+ ['mips_arch_variant=="loongson"', {
+ 'cflags': ['-mips3', '-Wa,-mips3'],
+ }, {
+ 'cflags': ['-mips32', '-Wa,-mips32'],
+ }],
+ ],
}],
- ['mips_arch_variant=="loongson"', {
- 'cflags': ['-mips3', '-Wa,-mips3'],
- }, {
- 'cflags': ['-mips32', '-Wa,-mips32'],
+ ],
+ }],
+ [ 'v8_can_use_fpu_instructions=="true"', {
+ 'defines': [
+ 'CAN_USE_FPU_INSTRUCTIONS',
+ ],
+ }],
+ [ 'v8_use_mips_abi_hardfloat=="true"', {
+ 'defines': [
+ '__mips_hard_float=1',
+ 'CAN_USE_FPU_INSTRUCTIONS',
+ ],
+ }, {
+ 'defines': [
+ '__mips_soft_float=1'
+ ],
+ }],
+ ['mips_arch_variant=="mips32r2"', {
+ 'defines': ['_MIPS_ARCH_MIPS32R2',],
+ }],
+ ['mips_arch_variant=="loongson"', {
+ 'defines': ['_MIPS_ARCH_LOONGSON',],
+ }],
+ # The MIPS assembler assumes the host is 32 bits,
+ # so force building 32-bit host tools.
+ ['host_arch=="x64"', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'cflags': ['-m32'],
+ 'ldflags': ['-m32'],
}],
],
}],
],
}],
- [ 'v8_can_use_fpu_instructions=="true"', {
- 'defines': [
- 'CAN_USE_FPU_INSTRUCTIONS',
- ],
- }],
- [ 'v8_use_mips_abi_hardfloat=="true"', {
- 'defines': [
- '__mips_hard_float=1',
- 'CAN_USE_FPU_INSTRUCTIONS',
- ],
- }, {
+ ['v8_target_arch=="x64"', {
'defines': [
- '__mips_soft_float=1'
+ 'V8_TARGET_ARCH_X64',
],
}],
- ['mips_arch_variant=="mips32r2"', {
- 'defines': ['_MIPS_ARCH_MIPS32R2',],
+ ],
+ }, { # Section for OS=="mac".
+ 'conditions': [
+ ['target_arch=="ia32"', {
+ 'xcode_settings': {
+ 'ARCHS': ['i386'],
+ }
}],
- ['mips_arch_variant=="loongson"', {
- 'defines': ['_MIPS_ARCH_LOONGSON',],
+ ['target_arch=="x64"', {
+ 'xcode_settings': {
+ 'ARCHS': ['x86_64'],
+ }
}],
],
- }], # v8_target_arch=="mips"
- ['v8_target_arch=="x64"', {
- 'defines': [
- 'V8_TARGET_ARCH_X64',
- ],
- 'xcode_settings': {
- 'ARCHS': [ 'x86_64' ],
- },
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'StackReserveSize': '2097152',
- },
- },
- }], # v8_target_arch=="x64"
+ }],
['v8_use_liveobjectlist=="true"', {
'defines': [
'ENABLE_DEBUGGER_SUPPORT',
@@ -238,10 +272,6 @@
'defines': [
'WIN32',
],
- 'msvs_configuration_attributes': {
- 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
- 'CharacterSet': '1',
- },
}],
['OS=="win" and v8_enable_prof==1', {
'msvs_settings': {
@@ -253,48 +283,21 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'conditions': [
- [ 'v8_no_strict_aliasing==1', {
- 'cflags': [ '-fno-strict-aliasing' ],
- }],
- ], # conditions
- }],
- ['OS=="solaris"', {
- 'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
- }],
- ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
- or OS=="netbsd" or OS=="mac" or OS=="android") and \
- (v8_target_arch=="arm" or v8_target_arch=="ia32" or \
- v8_target_arch=="mips")', {
- # Check whether the host compiler and target compiler support the
- # '-m32' option and set it if so.
- 'target_conditions': [
- ['_toolset=="host"', {
+ [ 'v8_target_arch!="x64"', {
+ # Pass -m32 to the compiler iff it understands the flag.
'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+ 'm32flag': '<!((echo | $(echo ${CXX:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
- 'xcode_settings': {
- 'ARCHS': [ 'i386' ],
- },
}],
- ['_toolset=="target"', {
- 'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
- },
- 'cflags': [ '<(m32flag)' ],
- 'ldflags': [ '<(m32flag)' ],
- 'xcode_settings': {
- 'ARCHS': [ 'i386' ],
- },
+ [ 'v8_no_strict_aliasing==1', {
+ 'cflags': [ '-fno-strict-aliasing' ],
}],
- ],
- }],
- ['OS=="freebsd" or OS=="openbsd"', {
- 'cflags': [ '-I/usr/local/include' ],
+ ], # conditions
}],
- ['OS=="netbsd"', {
- 'cflags': [ '-I/usr/pkg/include' ],
+ ['OS=="solaris"', {
+ 'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
], # conditions
'configurations': {
@@ -319,11 +322,21 @@
},
'VCLinkerTool': {
'LinkIncremental': '2',
+ # For future reference, the stack size needs to be increased
+ # when building for Windows 64-bit, otherwise some test cases
+ # can cause stack overflow.
+ # 'StackReserveSize': '297152',
},
},
'conditions': [
+ ['OS=="freebsd" or OS=="openbsd"', {
+ 'cflags': [ '-I/usr/local/include' ],
+ }],
+ ['OS=="netbsd"', {
+ 'cflags': [ '-I/usr/pkg/include' ],
+ }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
- 'cflags': [ '-Wno-unused-parameter',
+ 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
@@ -351,6 +364,12 @@
}],
],
}],
+ ['OS=="freebsd" or OS=="openbsd"', {
+ 'cflags': [ '-I/usr/local/include' ],
+ }],
+ ['OS=="netbsd"', {
+ 'cflags': [ '-I/usr/pkg/include' ],
+ }],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3', # -O3
@@ -363,6 +382,11 @@
},
}], # OS=="mac"
['OS=="win"', {
+ 'msvs_configuration_attributes': {
+ 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
+ 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
+ 'CharacterSet': '1',
+ },
'msvs_settings': {
'VCCLCompilerTool': {
'Optimization': '2',
@@ -383,7 +407,12 @@
'VCLinkerTool': {
'LinkIncremental': '1',
'OptimizeReferences': '2',
+ 'OptimizeForWindows98': '1',
'EnableCOMDATFolding': '2',
+ # For future reference, the stack size needs to be
+ # increased when building for Windows 64-bit, otherwise
+ # some test cases can cause stack overflow.
+ # 'StackReserveSize': '297152',
},
},
}], # OS=="win"
diff --git a/deps/v8/build/gyp_v8 b/deps/v8/build/gyp_v8
index 345f777d79..a926fe8ca3 100755
--- a/deps/v8/build/gyp_v8
+++ b/deps/v8/build/gyp_v8
@@ -38,11 +38,6 @@ import sys
script_dir = os.path.dirname(__file__)
v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
-if __name__ == '__main__':
- os.chdir(v8_root)
- script_dir = os.path.dirname(__file__)
- v8_root = '.'
-
sys.path.insert(0, os.path.join(v8_root, 'tools'))
import utils
@@ -98,7 +93,7 @@ def additional_include_files(args=[]):
result.append(path)
# Always include standalone.gypi
- AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi'))
+ AddInclude(os.path.join(script_dir, 'standalone.gypi'))
# Optionally add supplemental .gypi files if present.
supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
@@ -140,10 +135,7 @@ if __name__ == '__main__':
# path separators even on Windows due to the use of shlex.split().
args.extend(shlex.split(gyp_file))
else:
- # Note that this must not start with "./" or things break.
- # So we rely on having done os.chdir(v8_root) above and use the
- # relative path.
- args.append(os.path.join('build', 'all.gyp'))
+ args.append(os.path.join(script_dir, 'all.gyp'))
args.extend(['-I' + i for i in additional_include_files(args)])
@@ -164,6 +156,28 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform.
gyp_args = list(args)
+ target_arch = None
+ for p in gyp_args:
+ if p.find('-Dtarget_arch=') == 0:
+ target_arch = p
+ if target_arch is None:
+ gyp_args.append('-Dtarget_arch=ia32')
if utils.GuessOS() == 'linux':
- gyp_args.append('--generator-output=out')
+ gyp_args.append('-S.ia32')
run_gyp(gyp_args)
+
+ if utils.GuessOS() == 'linux':
+ gyp_args = list(args)
+ gyp_args.append('-Dtarget_arch=x64')
+ gyp_args.append('-S.x64')
+ run_gyp(gyp_args)
+
+ gyp_args = list(args)
+ gyp_args.append('-Dv8_target_arch=arm')
+ gyp_args.append('-S.arm')
+ run_gyp(gyp_args)
+
+ gyp_args = list(args)
+ gyp_args.append('-Dv8_target_arch=mips')
+ gyp_args.append('-S.mips')
+ run_gyp(gyp_args)
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index ebdf557230..dad05ae962 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -37,9 +37,8 @@
'variables': {
'variables': {
'conditions': [
- ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
- OS=="netbsd" or OS=="mac"', {
- # This handles the Unix platforms we generally deal with.
+ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
+ # This handles the Linux platforms we generally deal with.
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
# to gyp.
@@ -47,8 +46,7 @@
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
}, {
- # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
- # OS!="netbsd" and OS!="mac"
+ # OS!="linux" and OS!="freebsd" and OS!="openbsd" and OS!="netbsd"
'host_arch%': 'ia32',
}],
],
@@ -171,9 +169,6 @@
},
}], # OS=="win"
['OS=="mac"', {
- 'xcode_settings': {
- 'SYMROOT': '<(DEPTH)/xcodebuild',
- },
'target_defaults': {
'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO',
@@ -193,7 +188,6 @@
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
'MACOSX_DEPLOYMENT_TARGET': '10.4', # -mmacosx-version-min=10.4
'PREBINDING': 'NO', # No -Wl,-prebind
- 'SYMROOT': '<(DEPTH)/xcodebuild',
'USE_HEADERMAP': 'NO',
'OTHER_CFLAGS': [
'-fno-strict-aliasing',
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index d31ef5405c..9024531992 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -62,13 +62,11 @@
#else // _WIN32
-// Setup for Linux shared library export.
+// Setup for Linux shared library export. There is no need to distinguish
+// between building or using the V8 shared library, but we should not
+// export symbols when we are building a static library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
-#ifdef BUILDING_V8_SHARED
#define V8EXPORT __attribute__ ((visibility("default")))
-#else
-#define V8EXPORT
-#endif
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 2482b379ac..0d0b5357d5 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -68,7 +68,6 @@ SOURCES = {
diy-fp.cc
dtoa.cc
elements.cc
- elements-kind.cc
execution.cc
factory.cc
flags.cc
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 74886f0085..0bc93c2ff2 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -5040,7 +5040,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!self->HasFastObjectElements()) {
+ if (!self->HasFastElements()) {
return Local<Object>();
}
i::FixedArray* elms = i::FixedArray::cast(self->elements());
@@ -6045,6 +6045,13 @@ int HeapGraphNode::GetSelfSize() const {
}
+int HeapGraphNode::GetRetainedSize() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
+ return ToInternal(this)->retained_size();
+}
+
+
int HeapGraphNode::GetChildrenCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
@@ -6056,7 +6063,29 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
- ToInternal(this)->children()[index]);
+ &ToInternal(this)->children()[index]);
+}
+
+
+int HeapGraphNode::GetRetainersCount() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
+ return ToInternal(this)->retainers().length();
+}
+
+
+const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
+ return reinterpret_cast<const HeapGraphEdge*>(
+ ToInternal(this)->retainers()[index]);
+}
+
+
+const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
+ return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
}
@@ -6128,7 +6157,7 @@ const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
int HeapSnapshot::GetNodesCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
- return ToInternal(this)->entries().length();
+ return ToInternal(this)->entries()->length();
}
@@ -6136,7 +6165,7 @@ const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>(
- &ToInternal(this)->entries().at(index));
+ ToInternal(this)->entries()->at(index));
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 05e5e72e8b..3ad57f4657 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
v8::internal::Object* NeanderObject::get(int offset) {
- ASSERT(value()->HasFastObjectElements());
+ ASSERT(value()->HasFastElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset);
}
void NeanderObject::set(int offset, v8::internal::Object* value) {
- ASSERT(value_->HasFastObjectElements());
+ ASSERT(value_->HasFastElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
}
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 578bd810d4..c99e778a7f 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -208,8 +208,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
+ __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
@@ -441,10 +440,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(call_generic_code);
__ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
r2,
r9,
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 2296490d9d..ad2ab7e09d 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -4824,32 +4824,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
+ const int kRegExpExecuteArguments = 8;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
- // Argument 9 (sp[20]): Pass current isolate address.
+ // Argument 8 (sp[16]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address()));
- __ str(r0, MemOperand(sp, 5 * kPointerSize));
+ __ str(r0, MemOperand(sp, 4 * kPointerSize));
- // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
+ // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 4 * kPointerSize));
+ __ str(r0, MemOperand(sp, 3 * kPointerSize));
- // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
+ // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(r0, Operand(0));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
@@ -4898,9 +4893,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ cmp(r0, Operand(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
+ __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
@@ -7102,8 +7095,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
@@ -7366,9 +7359,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(r2, r5, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
- __ CheckFastSmiElements(r2, r5, &fast_elements);
+ __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -7380,7 +7373,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
@@ -7391,8 +7384,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index e00afb9035..befd8f2de7 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
@@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
-void ElementsTransitionGenerator::GenerateSmiToDouble(
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 3e7a1e9d0e..96139a2597 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -125,8 +125,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions);
}
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 2a5887a953..3c8df292c4 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -1701,7 +1701,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
+ bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1722,7 +1722,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1750,7 +1751,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (IsFastObjectElementsKind(constant_elements_kind)) {
+ if (constant_elements_kind == FAST_ELEMENTS) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index fd93480986..c88c257092 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(address());
}
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Address address) {
Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1729,31 +1729,34 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
- // This is patching a conditional "jump if not smi/jump if smi" site.
- // Enabling by changing from
- // cmp rx, rx
- // b eq/ne, <target>
- // to
- // tst rx, #kSmiTagMask
- // b ne/eq, <target>
- // and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
- Register reg = Assembler::GetRn(instr_at_patch);
- if (check == ENABLE_INLINED_SMI_CHECK) {
- ASSERT(Assembler::IsCmpRegister(instr_at_patch));
- ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
- Assembler::GetRm(instr_at_patch).code());
- patcher.masm()->tst(reg, Operand(kSmiTagMask));
- } else {
- ASSERT(check == DISABLE_INLINED_SMI_CHECK);
- ASSERT(Assembler::IsTstImmediate(instr_at_patch));
- patcher.masm()->cmp(reg, reg);
- }
+ ASSERT(Assembler::IsCmpRegister(instr_at_patch));
+ ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
+ Assembler::GetRm(instr_at_patch).code());
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
+ // This is patching a "jump if not smi" site to be active.
+ // Changing
+ // cmp rx, rx
+ // b eq, <target>
+ // to
+ // tst rx, #kSmiTagMask
+ // b ne, <target>
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRn(instr_at_patch);
+ patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(ne);
} else {
ASSERT(Assembler::GetCondition(branch_instr) == ne);
+ // This is patching a "jump if smi" site to be active.
+ // Changing
+ // cmp rx, rx
+ // b ne, <target>
+ // to
+ // tst rx, #kSmiTagMask
+ // b eq, <target>
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRn(instr_at_patch);
+ patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq);
}
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index c97831aa13..5c60f5321c 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -2082,9 +2082,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index dbae81310c..ec8aac8036 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -1236,7 +1236,6 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1253,13 +1252,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@@ -1273,7 +1272,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1742,7 +1740,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1765,7 +1762,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@@ -1810,7 +1806,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index fd4b3e8408..79b56fc077 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -2587,38 +2587,42 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
-
int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- return;
- }
Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ cmp(scratch, Operand(map));
- if (last && !need_generic) {
- DeoptimizeIf(ne, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- } else {
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ mov(r2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
Label next;
+ __ cmp(scratch, Operand(map));
__ b(ne, &next);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ b(&done);
__ bind(&next);
}
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ __ cmp(scratch, Operand(map));
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ b(ne, &generic);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ b(&done);
+ __ bind(&generic);
+ __ mov(r2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(ne, instr->environment());
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
}
- if (need_generic) {
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
}
@@ -2696,10 +2700,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ ubfx(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
- __ cmp(scratch, Operand(GetInitialFastElementsKind()));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ b(le, &done);
+ __ cmp(scratch, Operand(FAST_ELEMENTS));
+ __ b(eq, &done);
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(lt, &fail);
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@@ -2746,9 +2748,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- uint32_t offset = FixedArray::kHeaderSize +
- (instr->additional_index() << kPointerSizeLog2);
- __ ldr(result, FieldMemOperand(scratch, offset));
+ __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2780,21 +2780,18 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
Operand operand = key_is_constant
- ? Operand(((constant_key + instr->additional_index()) << shift_size) +
+ ? Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(elements, elements, operand);
if (!key_is_constant) {
__ add(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << shift_size)));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
__ vldr(result, elements, 0);
}
@@ -2816,33 +2813,26 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
- ? Operand(constant_key << shift_size)
+ ? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(result.low(), scratch0(), additional_offset);
+ __ vldr(result.low(), scratch0(), 0);
__ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
+ __ vldr(result, scratch0(), 0);
}
} else {
Register result = ToRegister(instr->result());
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- (constant_key << shift_size) + additional_offset)
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+ : MemOperand(external_pointer, key, LSL, shift_size));
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
@@ -2870,12 +2860,9 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3743,16 +3730,10 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
- (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
- + FixedArray::kHeaderSize;
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ str(value, FieldMemOperand(elements, offset));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- if (instr->additional_index() != 0) {
- __ add(scratch,
- scratch,
- Operand(instr->additional_index() << kPointerSizeLog2));
- }
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
}
@@ -3794,7 +3775,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
Operand operand = key_is_constant
- ? Operand((constant_key << shift_size) +
+ ? Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
@@ -3812,7 +3793,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
vs);
}
- __ vstr(value, scratch, instr->additional_index() << shift_size);
+ __ vstr(value, scratch, 0);
}
@@ -3833,33 +3814,25 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant ? Operand(constant_key << shift_size)
+ Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), scratch0(), additional_offset);
+ __ vstr(double_scratch0().low(), scratch0(), 0);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, scratch0(), additional_offset);
+ __ vstr(value, scratch0(), 0);
}
} else {
Register value(ToRegister(instr->value()));
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- ((constant_key + instr->additional_index())
- << shift_size))
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+ : MemOperand(external_pointer, key, LSL, shift_size));
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -3878,10 +3851,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3918,22 +3888,20 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
__ mov(new_map_reg, Operand(to_map));
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
@@ -4707,9 +4675,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -4860,11 +4827,10 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // Deopt if the literal boilerplate ElementsKind is of a type different than
+ // the expected one. The check isn't necessary if the boilerplate has already
+ // been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index b4aec54555..42c9961b3e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -1868,12 +1868,10 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
@@ -1881,25 +1879,22 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(ls, fail);
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(hi, fail);
}
@@ -2002,17 +1997,22 @@ void MacroAssembler::CompareMap(Register obj,
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
cmp(scratch, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
- if (!current_map) break;
- b(eq, early_success);
- cmp(scratch, Operand(Handle<Map>(current_map)));
- }
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ b(eq, early_success);
+ cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ b(eq, early_success);
+ cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
}
}
}
@@ -2865,38 +2865,28 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
- ldr(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmp(map_in_out, scratch);
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(map_in_out, FieldMemOperand(scratch, offset));
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
+ Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
map_out,
scratch,
&done);
@@ -3748,7 +3738,7 @@ CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index b93aba1f51..360f4c128c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -512,8 +512,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
- Register map_out,
- bool can_have_holes);
+ Register map_out);
void LoadGlobalFunction(int index, Register function);
@@ -803,9 +802,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
+ void CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 11790e5183..a833624ceb 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,49 +43,45 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
- * - r4 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
* - r5 : Pointer to current code object (Code*) including heap object tag.
* - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
- * - r8 : Points to tip of backtrack stack
+ * - r8 : points to tip of backtrack stack
* - r9 : Unused, might be used by C code and expected unchanged.
* - r10 : End of input (points to byte after last character in input).
* - r11 : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - r12 : IP register, used by assembler. Very volatile.
- * - r13/sp : Points to tip of C stack.
+ * - r13/sp : points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
- * - fp[56] Isolate* isolate (address of the current isolate)
- * - fp[52] direct_call (if 1, direct call from JavaScript code,
- * if 0, call through the runtime system).
- * - fp[48] stack_area_base (high end of the memory area to use as
- * backtracking stack).
- * - fp[44] capture array size (may fit multiple sets of matches)
+ * - fp[52] Isolate* isolate (Address of the current isolate)
+ * - fp[48] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[44] stack_area_base (High end of the memory area to use as
+ * backtracking stack).
* - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call.
* --- sp when called ---
- * - fp[32] return address (lr).
- * - fp[28] old frame pointer (r11).
+ * - fp[32] return address (lr).
+ * - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
- * - fp[-4] end of input (address of end of string).
- * - fp[-8] start of input (address of first character in string).
+ * - fp[-4] end of input (Address of end of string).
+ * - fp[-8] start of input (Address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] success counter (only for global regexps to count matches).
- * - fp[-24] Offset of location before start of input (effectively character
+ * - fp[-20] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
- * - fp[-28] At start (if 1, we are starting at the start of the
+ * - fp[-24] At start (if 1, we are starting at the start of the
* string, otherwise 0)
- * - fp[-32] register 0 (Only positions must be stored in the first
+ * - fp[-28] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -201,9 +197,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
+ __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(ne, &not_at_start);
+ BranchOrBacktrack(eq, &not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@@ -216,9 +212,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
+ __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(ne, on_not_at_start);
+ BranchOrBacktrack(eq, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ add(r0, end_of_input_address(), Operand(current_input_offset()));
@@ -659,7 +655,6 @@ void RegExpMacroAssemblerARM::Fail() {
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
- Label return_r0;
// Finalize code - write the entry point code now we know how many
// registers we need.
@@ -683,9 +678,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
+ __ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@@ -704,13 +698,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
+ __ jmp(&exit_label_);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &return_r0);
+ __ b(ne, &exit_label_);
__ bind(&stack_ok);
@@ -731,46 +725,42 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Initialize code pointer register
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(r1, Operand(0, RelocInfo::NONE));
- __ b(ne, &load_char_start_regexp);
- __ mov(current_character(), Operand('\n'), LeaveCC, eq);
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ cmp(r1, Operand(0));
+ __ mov(r1, Operand(1), LeaveCC, eq);
+ __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ str(r1, MemOperand(frame_pointer(), kAtStart));
- // Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
- if (num_saved_registers_ > 8) {
- // Address of register 0.
- __ add(r1, frame_pointer(), Operand(kRegisterZero));
- __ mov(r2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(ne, &init_loop);
- } else {
- for (int i = 0; i < num_saved_registers_; i++) {
- __ str(r0, register_location(i));
- }
- }
+
+ // Address of register 0.
+ __ add(r1, frame_pointer(), Operand(kRegisterZero));
+ __ mov(r2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(ne, &init_loop);
}
// Initialize backtrack stack pointer.
__ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
-
+ // Initialize code pointer register
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+ // Load previous char as initial value of current character register.
+ Label at_start;
+ __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ b(ne, &at_start);
+ LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
+ __ jmp(&start_label_);
+ __ bind(&at_start);
+ __ mov(current_character(), Operand('\n'));
__ jmp(&start_label_);
+
// Exit code:
if (success_label_.is_linked()) {
// Save captures when successful.
@@ -796,10 +786,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ ldr(r2, register_location(i));
__ ldr(r3, register_location(i + 1));
- if (global()) {
- // Keep capture start in r4 for the zero-length check later.
- __ mov(r4, r2);
- }
if (mode_ == UC16) {
__ add(r2, r1, Operand(r2, ASR, 1));
__ add(r3, r1, Operand(r3, ASR, 1));
@@ -811,54 +797,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
}
}
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
- // Increment success counter.
- __ add(r0, r0, Operand(1));
- __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ sub(r1, r1, Operand(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmp(r1, Operand(num_saved_registers_));
- __ b(lt, &return_r0);
-
- __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- // Advance the location for output.
- __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
- __ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
-
- // Prepare r0 to initialize registers with its value in the next run.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Special case for zero-length matches.
- // r4: capture start index
- __ cmp(current_input_offset(), r4);
- // Not a zero-length match, restart.
- __ b(ne, &load_char_start_regexp);
- // Offset from the end is zero if we already reached the end.
- __ cmp(current_input_offset(), Operand(0));
- __ b(eq, &exit_label_);
- // Advance current position after a zero-length match.
- __ add(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
- __ b(&load_char_start_regexp);
- } else {
- __ mov(r0, Operand(SUCCESS));
- }
+ __ mov(r0, Operand(SUCCESS));
}
-
// Exit and return r0
__ bind(&exit_label_);
- if (global()) {
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- }
-
- __ bind(&return_r0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
@@ -880,7 +822,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given
// result as return value.
- __ b(ne, &return_r0);
+ __ b(ne, &exit_label_);
// String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@@ -917,7 +859,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
+ __ jmp(&exit_label_);
}
CodeDesc code_desc;
@@ -1072,9 +1014,8 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
}
-bool RegExpMacroAssemblerARM::Succeed() {
+void RegExpMacroAssemblerARM::Succeed() {
__ jmp(&success_label_);
- return global();
}
@@ -1366,9 +1307,8 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
- // r4 is not being used to store the capture start index at this point.
- __ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
- offset = r4;
+ __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = r0;
}
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it.
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index f2d5c55694..14f984f567 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -113,7 +113,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
+ virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@@ -137,8 +137,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@@ -150,10 +149,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kInputStartMinusOne = kInputString - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index d1cad15bd0..585f1e0176 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -49,16 +49,16 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, int, Address, int, Isolate*);
+ void*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -401,9 +401,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
+ entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index a024d79800..49c0982301 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -1581,29 +1581,16 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiElements(r3, r7, &call_builtin);
+ __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
// edx: receiver
// r3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
r3,
r7,
- &try_holey_map);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- r3,
- r7,
&call_builtin);
__ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
@@ -3385,11 +3372,8 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3513,11 +3497,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3857,11 +3838,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3924,11 +3902,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -4067,11 +4042,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -4253,7 +4225,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
@@ -4281,7 +4253,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4291,7 +4263,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
} else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
+ ASSERT(elements_kind == FAST_ELEMENTS);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index a20f87b122..c65c68c2d7 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -484,8 +484,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
- object_function_map->set_instance_descriptors(
- heap->empty_descriptor_array());
+ object_function_map->
+ set_instance_descriptors(heap->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
@@ -516,10 +516,12 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
function_instance_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
- Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
- empty_function_map->set_prototype(
- global_context()->object_function()->prototype());
- empty_function->set_map(*empty_function_map);
+ Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
+ function_without_prototype_map);
+ empty_fm->set_instance_descriptors(
+ function_without_prototype_map->instance_descriptors());
+ empty_fm->set_prototype(global_context()->object_function()->prototype());
+ empty_function->set_map(*empty_fm);
return empty_function;
}
@@ -1092,7 +1094,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object.
ASSERT(result->HasFastProperties());
- ASSERT(result->HasFastObjectElements());
+ ASSERT(result->HasFastElements());
#endif
}
@@ -1185,7 +1187,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object.
ASSERT(result->HasFastProperties());
- ASSERT(result->HasFastObjectElements());
+ ASSERT(result->HasFastElements());
#endif
}
@@ -1635,7 +1637,7 @@ bool Genesis::InstallNatives() {
array_function->initial_map()->CopyDropTransitions();
Map* new_map;
if (!maybe_map->To<Map>(&new_map)) return false;
- new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
+ new_map->set_elements_kind(FAST_ELEMENTS);
array_function->set_initial_map(new_map);
// Make "length" magic on instances.
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 64ec3d9fcc..84a0c3d19c 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -200,12 +200,9 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array->set_elements(heap->empty_fixed_array());
if (!FLAG_smi_only_arrays) {
Context* global_context = isolate->context()->global_context();
- if (array->GetElementsKind() == GetInitialFastElementsKind() &&
- !global_context->js_array_maps()->IsUndefined()) {
- FixedArray* map_array =
- FixedArray::cast(global_context->js_array_maps());
- array->set_map(Map::cast(map_array->
- get(TERMINAL_FAST_ELEMENTS_KIND)));
+ if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
+ !global_context->object_js_array_map()->IsUndefined()) {
+ array->set_map(Map::cast(global_context->object_js_array_map()));
}
}
} else {
@@ -225,13 +222,6 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
}
- ElementsKind elements_kind = array->GetElementsKind();
- if (!IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe_array =
- array->TransitionElementsKind(elements_kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
// We do not use SetContent to skip the unnecessary elements type check.
array->set_elements(FixedArray::cast(fixed_array));
array->set_length(Smi::cast(obj));
@@ -260,7 +250,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Allocate an appropriately typed elements array.
MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
number_of_elements);
} else {
@@ -271,15 +261,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Fill in the content
switch (array->GetElementsKind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
+ case FAST_SMI_ONLY_ELEMENTS: {
FixedArray* smi_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
}
break;
}
- case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@@ -289,7 +277,6 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
}
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
@@ -425,7 +412,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
HeapObject* elms = array->elements();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
- if (args == NULL || array->HasFastObjectElements()) return elms;
+ if (args == NULL || array->HasFastElements()) return elms;
if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition =
@@ -435,7 +422,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
- if (args == NULL || array->HasFastObjectElements() ||
+ if (args == NULL || array->HasFastElements() ||
maybe_writable_result->IsFailure()) {
return maybe_writable_result;
}
@@ -529,8 +516,8 @@ BUILTIN(ArrayPush) {
}
FixedArray* new_elms = FixedArray::cast(obj);
- ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
+ new_elms, FAST_ELEMENTS, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
@@ -601,7 +588,7 @@ BUILTIN(ArrayShift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
+ ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
@@ -643,7 +630,7 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
+ ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -665,8 +652,8 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
- ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
+ new_elms, FAST_ELEMENTS, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
@@ -695,7 +682,7 @@ BUILTIN(ArraySlice) {
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
- if (!array->HasFastSmiOrObjectElements() ||
+ if (!array->HasFastTypeElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -711,7 +698,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
- && JSObject::cast(receiver)->HasFastSmiOrObjectElements();
+ && JSObject::cast(receiver)->HasFastTypeElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -776,9 +763,9 @@ BUILTIN(ArraySlice) {
JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array;
- CopyObjectToObjectElements(elms, elements_kind, k,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
FixedArray::cast(result_array->elements()),
- elements_kind, 0, result_len);
+ FAST_ELEMENTS, 0, result_len);
return result_array;
}
@@ -799,7 +786,7 @@ BUILTIN(ArraySplice) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
+ ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
@@ -850,9 +837,9 @@ BUILTIN(ArraySplice) {
{
// Fill newly created array.
- CopyObjectToObjectElements(elms, elements_kind, actual_start,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
FixedArray::cast(result_array->elements()),
- elements_kind, 0, actual_delete_count);
+ FAST_ELEMENTS, 0, actual_delete_count);
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
@@ -901,13 +888,12 @@ BUILTIN(ArraySplice) {
{
// Copy the part before actual_start as is.
- ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0,
- new_elms, kind, 0, actual_start);
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
+ new_elms, FAST_ELEMENTS, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
- CopyObjectToObjectElements(elms, kind,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS,
actual_start + actual_delete_count,
- new_elms, kind,
+ new_elms, FAST_ELEMENTS,
actual_start + item_count, to_copy);
}
@@ -954,12 +940,11 @@ BUILTIN(ArrayConcat) {
// and calculating total length.
int n_arguments = args.length();
int result_len = 0;
- ElementsKind elements_kind = GetInitialFastElementsKind();
+ ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
- if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
- JSArray::cast(arg)->GetPrototype() != array_proto) {
+ if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
+ || JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@@ -976,18 +961,8 @@ BUILTIN(ArrayConcat) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- if (!JSArray::cast(arg)->HasFastSmiElements()) {
- if (IsFastSmiElementsKind(elements_kind)) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- elements_kind = FAST_ELEMENTS;
- }
- }
- }
-
- if (JSArray::cast(arg)->HasFastHoleyElements()) {
- elements_kind = GetHoleyElementsKind(elements_kind);
+ if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) {
+ elements_kind = FAST_ELEMENTS;
}
}
@@ -1007,8 +982,8 @@ BUILTIN(ArrayConcat) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
- CopyObjectToObjectElements(elms, elements_kind, 0,
- result_elms, elements_kind,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
+ result_elms, FAST_ELEMENTS,
start_pos, len);
start_pos += len;
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 8f316606c2..814e358721 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -262,13 +262,10 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break;
case EXTERNAL_BYTE_ELEMENTS:
@@ -295,9 +292,7 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS: {
+ case FAST_SMI_ONLY_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_,
@@ -305,7 +300,6 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
}
break;
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_,
grow_mode_);
@@ -436,32 +430,24 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail;
- ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) {
- if (IsFastSmiOrObjectElementsKind(to_)) {
- if (IsFastSmiOrObjectElementsKind(from_)) {
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm);
- } else if (IsFastDoubleElementsKind(from_)) {
- ASSERT(!IsFastSmiElementsKind(to_));
+ if (to_ == FAST_ELEMENTS) {
+ if (from_ == FAST_SMI_ONLY_ELEMENTS) {
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+ } else if (from_ == FAST_DOUBLE_ELEMENTS) {
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
} else {
UNREACHABLE();
}
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_,
- to_,
+ FAST_ELEMENTS,
grow_mode_);
- } else if (IsFastSmiElementsKind(from_) &&
- IsFastDoubleElementsKind(to_)) {
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_,
grow_mode_);
- } else if (IsFastDoubleElementsKind(from_)) {
- ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 5c8717838f..b67e961ac7 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -498,7 +498,6 @@ class ICCompareStub: public CodeStub {
virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_);
- code->set_compare_operation(op_);
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 08a777f2ad..50d70f265d 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -95,8 +95,8 @@ UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
- static void GenerateMapChangeElementsTransition(MacroAssembler* masm);
- static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail);
+ static void GenerateSmiOnlyToObject(MacroAssembler* masm);
+ static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
private:
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index d154b82ca0..647c15c153 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -106,7 +106,9 @@ enum BindingFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
- V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
+ V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \
+ V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
+ V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@@ -246,7 +248,9 @@ class Context: public FixedArray {
OBJECT_FUNCTION_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
- JS_ARRAY_MAPS_INDEX,
+ SMI_JS_ARRAY_MAP_INDEX,
+ DOUBLE_JS_ARRAY_MAP_INDEX,
+ OBJECT_JS_ARRAY_MAP_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX,
@@ -369,6 +373,18 @@ class Context: public FixedArray {
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
+ static int GetContextMapIndexFromElementsKind(
+ ElementsKind elements_kind) {
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
+ } else if (elements_kind == FAST_ELEMENTS) {
+ return Context::OBJECT_JS_ARRAY_MAP_INDEX;
+ } else {
+ ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+ return Context::SMI_JS_ARRAY_MAP_INDEX;
+ }
+ }
+
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \
ASSERT(IsGlobalContext()); \
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 3f0b2245f4..26d0bc10e1 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -26,8 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Defined when linking against shared lib on Windows.
-#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
+#ifdef USING_V8_SHARED // Defined when linking against shared lib on Windows.
#define V8_SHARED
#endif
@@ -316,8 +315,8 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
}
-const char kArrayBufferMarkerPropName[] = "_is_array_buffer_";
-const char kArrayBufferReferencePropName[] = "_array_buffer_ref_";
+const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
+const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
static const int kExternalArrayAllocationHeaderSize = 2;
@@ -354,11 +353,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
Local<Value> length_value = (args.Length() < 3)
? (first_arg_is_array_buffer
- ? args[0]->ToObject()->Get(String::New("byteLength"))
+ ? args[0]->ToObject()->Get(String::New("length"))
: args[0])
: args[2];
- size_t byteLength = convertToUint(length_value, &try_catch);
- size_t length = byteLength;
+ size_t length = convertToUint(length_value, &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
void* data = NULL;
@@ -370,7 +368,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
data = derived_from->GetIndexedPropertiesExternalArrayData();
size_t array_buffer_length = convertToUint(
- derived_from->Get(String::New("byteLength")),
+ derived_from->Get(String::New("length")),
&try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
@@ -453,20 +451,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
array->SetIndexedPropertiesToExternalArrayData(
reinterpret_cast<uint8_t*>(data) + offset, type,
static_cast<int>(length));
- array->Set(String::New("byteLength"),
- Int32::New(static_cast<int32_t>(byteLength)), ReadOnly);
- if (!is_array_buffer_construct) {
- array->Set(String::New("length"),
- Int32::New(static_cast<int32_t>(length)), ReadOnly);
- array->Set(String::New("byteOffset"),
- Int32::New(static_cast<int32_t>(offset)), ReadOnly);
- array->Set(String::New("BYTES_PER_ELEMENT"),
- Int32::New(static_cast<int32_t>(element_size)));
- // We currently support 'buffer' property only if constructed from a buffer.
- if (first_arg_is_array_buffer) {
- array->Set(String::New("buffer"), args[0], ReadOnly);
- }
- }
+ array->Set(String::New("length"),
+ Int32::New(static_cast<int32_t>(length)), ReadOnly);
+ array->Set(String::New("BYTES_PER_ELEMENT"),
+ Int32::New(static_cast<int32_t>(element_size)));
return array;
}
@@ -834,8 +822,8 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
- global_template->Set(String::New("readbuffer"),
- FunctionTemplate::New(ReadBuffer));
+ global_template->Set(String::New("readbinary"),
+ FunctionTemplate::New(ReadBinary));
global_template->Set(String::New("readline"),
FunctionTemplate::New(ReadLine));
global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@@ -1054,29 +1042,20 @@ static char* ReadChars(const char* name, int* size_out) {
}
-Handle<Value> Shell::ReadBuffer(const Arguments& args) {
+Handle<Value> Shell::ReadBinary(const Arguments& args) {
String::Utf8Value filename(args[0]);
- int length;
+ int size;
if (*filename == NULL) {
return ThrowException(String::New("Error loading file"));
}
- char* data = ReadChars(*filename, &length);
- if (data == NULL) {
+ char* chars = ReadChars(*filename, &size);
+ if (chars == NULL) {
return ThrowException(String::New("Error reading file"));
}
-
- Handle<Object> buffer = Object::New();
- buffer->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
-
- Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
- persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
- persistent_buffer.MarkIndependent();
-
- buffer->SetIndexedPropertiesToExternalArrayData(
- reinterpret_cast<uint8_t*>(data), kExternalUnsignedByteArray, length);
- buffer->Set(String::New("byteLength"),
- Int32::New(static_cast<int32_t>(length)), ReadOnly);
- return buffer;
+ // We skip checking the string for UTF8 characters and use it raw as
+ // backing store for the external string with 8-bit characters.
+ BinaryResource* resource = new BinaryResource(chars, size);
+ return String::NewExternal(resource);
}
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index b315086fcf..c872f90958 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -307,7 +307,7 @@ class Shell : public i::AllStatic {
static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
- static Handle<Value> ReadBuffer(const Arguments& args);
+ static Handle<Value> ReadBinary(const Arguments& args);
static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin();
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index 9a81979f9d..511663d8ee 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -247,7 +247,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
while (!(c == '\n' && prev_c == '\r')) {
prev_c = c;
received = conn->Receive(&c, 1);
- if (received == 0) {
+ if (received <= 0) {
PrintF("Error %d\n", Socket::LastError());
return SmartArrayPointer<char>();
}
@@ -323,41 +323,41 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
const char* embedding_host) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer.
+ bool ok;
int len;
- int r;
// Send the header.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Type: connect\r\n");
- r = conn->Send(buffer, len);
- if (r != len) return false;
+ ok = conn->Send(buffer, len);
+ if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"V8-Version: %s\r\n", v8::V8::GetVersion());
- r = conn->Send(buffer, len);
- if (r != len) return false;
+ ok = conn->Send(buffer, len);
+ if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Protocol-Version: 1\r\n");
- r = conn->Send(buffer, len);
- if (r != len) return false;
+ ok = conn->Send(buffer, len);
+ if (!ok) return false;
if (embedding_host != NULL) {
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Embedding-Host: %s\r\n", embedding_host);
- r = conn->Send(buffer, len);
- if (r != len) return false;
+ ok = conn->Send(buffer, len);
+ if (!ok) return false;
}
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"%s: 0\r\n", kContentLength);
- r = conn->Send(buffer, len);
- if (r != len) return false;
+ ok = conn->Send(buffer, len);
+ if (!ok) return false;
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- r = conn->Send(buffer, len);
- if (r != len) return false;
+ ok = conn->Send(buffer, len);
+ if (!ok) return false;
// No body for connect message.
@@ -454,7 +454,7 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
int total_received = 0;
while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received);
- if (received == 0) {
+ if (received <= 0) {
return total_received;
}
total_received += received;
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 9efb5c37aa..99256ba21a 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -892,16 +892,6 @@ void Debug::Iterate(ObjectVisitor* v) {
}
-void Debug::PutValuesOnStackAndDie(int start,
- Address c_entry_fp,
- Address last_fp,
- Address larger_fp,
- int count,
- int end) {
- OS::Abort();
-}
-
-
Object* Debug::Break(Arguments args) {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
@@ -994,34 +984,11 @@ Object* Debug::Break(Arguments args) {
// Count frames until target frame
int count = 0;
JavaScriptFrameIterator it(isolate_);
- while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) {
+ while (!it.done() && it.frame()->fp() != thread_local_.last_fp_) {
count++;
it.Advance();
}
- // Catch the cases that would lead to crashes and capture
- // - C entry FP at which to start stack crawl.
- // - FP of the frame at which we plan to stop stepping out (last FP).
- // - current FP that's larger than last FP.
- // - Counter for the number of steps to step out.
- if (it.done()) {
- // We crawled the entire stack, never reaching last_fp_.
- PutValuesOnStackAndDie(0xBEEEEEEE,
- frame->fp(),
- thread_local_.last_fp_,
- NULL,
- count,
- 0xFEEEEEEE);
- } else if (it.frame()->fp() != thread_local_.last_fp_) {
- // We crawled over last_fp_, without getting a match.
- PutValuesOnStackAndDie(0xBEEEEEEE,
- frame->fp(),
- thread_local_.last_fp_,
- it.frame()->fp(),
- count,
- 0xFEEEEEEE);
- }
-
// If we found original frame
if (it.frame()->fp() == thread_local_.last_fp_) {
if (step_count > 1) {
@@ -2260,13 +2227,6 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
}
-const int Debug::FramePaddingLayout::kInitialSize = 1;
-
-
-// Any even value bigger than kInitialSize as needed for stack scanning.
-const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
-
-
bool Debug::IsDebugGlobal(GlobalObject* global) {
return IsLoaded() && global == debug_context()->global();
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index d9c966c37f..7ec78015c6 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -232,12 +232,6 @@ class Debug {
void PreemptionWhileInDebugger();
void Iterate(ObjectVisitor* v);
- NO_INLINE(void PutValuesOnStackAndDie(int start,
- Address c_entry_fp,
- Address last_fp,
- Address larger_fp,
- int count,
- int end));
Object* Break(Arguments args);
void SetBreakPoint(Handle<SharedFunctionInfo> shared,
Handle<Object> break_point_object,
@@ -463,50 +457,6 @@ class Debug {
// Architecture-specific constant.
static const bool kFrameDropperSupported;
- /**
- * Defines layout of a stack frame that supports padding. This is a regular
- * internal frame that has a flexible stack structure. LiveEdit can shift
- * its lower part up the stack, taking up the 'padding' space when additional
- * stack memory is required.
- * Such frame is expected immediately above the topmost JavaScript frame.
- *
- * Stack Layout:
- * --- Top
- * LiveEdit routine frames
- * ---
- * C frames of debug handler
- * ---
- * ...
- * ---
- * An internal frame that has n padding words:
- * - any number of words as needed by code -- upper part of frame
- * - padding size: a Smi storing n -- current size of padding
- * - padding: n words filled with kPaddingValue in form of Smi
- * - 3 context/type words of a regular InternalFrame
- * - fp
- * ---
- * Topmost JavaScript frame
- * ---
- * ...
- * --- Bottom
- */
- class FramePaddingLayout : public AllStatic {
- public:
- // Architecture-specific constant.
- static const bool kIsSupported;
-
- // A size of frame base including fp. Padding words starts right above
- // the base.
- static const int kFrameBaseSize = 4;
-
- // A number of words that should be reserved on stack for the LiveEdit use.
- // Normally equals 1. Stored on stack in form of Smi.
- static const int kInitialSize;
- // A value that padding words are filled with (in form of Smi). Going
- // bottom-top, the first word not having this value is a counter word.
- static const int kPaddingValue;
- };
-
private:
explicit Debug(Isolate* isolate);
~Debug();
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
deleted file mode 100644
index 655a23bf1e..0000000000
--- a/deps/v8/src/elements-kind.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "elements-kind.h"
-
-#include "api.h"
-#include "elements.h"
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-void PrintElementsKind(FILE* out, ElementsKind kind) {
- ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
- PrintF(out, "%s", accessor->name());
-}
-
-
-ElementsKind GetInitialFastElementsKind() {
- if (FLAG_packed_arrays) {
- return FAST_SMI_ELEMENTS;
- } else {
- return FAST_HOLEY_SMI_ELEMENTS;
- }
-}
-
-
-struct InitializeFastElementsKindSequence {
- static void Construct(
- ElementsKind** fast_elements_kind_sequence_ptr) {
- ElementsKind* fast_elements_kind_sequence =
- new ElementsKind[kFastElementsKindCount];
- *fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
- STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
- fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS;
- fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS;
- fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS;
- fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
- fast_elements_kind_sequence[4] = FAST_ELEMENTS;
- fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
- }
-};
-
-
-static LazyInstance<ElementsKind*,
- InitializeFastElementsKindSequence>::type
- fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
-
-
-ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
- ASSERT(sequence_number >= 0 &&
- sequence_number < kFastElementsKindCount);
- return fast_elements_kind_sequence.Get()[sequence_number];
-}
-
-int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
- for (int i = 0; i < kFastElementsKindCount; ++i) {
- if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
- return i;
- }
- }
- UNREACHABLE();
- return 0;
-}
-
-
-ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
- bool allow_only_packed) {
- ASSERT(IsFastElementsKind(elements_kind));
- ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
- while (true) {
- int index =
- GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
- elements_kind = GetFastElementsKindFromSequenceIndex(index);
- if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
- return elements_kind;
- }
- }
- UNREACHABLE();
- return TERMINAL_FAST_ELEMENTS_KIND;
-}
-
-
-bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
- ElementsKind to_kind) {
- switch (from_kind) {
- case FAST_SMI_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS;
- case FAST_HOLEY_SMI_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS &&
- to_kind != FAST_HOLEY_SMI_ELEMENTS;
- case FAST_DOUBLE_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS &&
- to_kind != FAST_HOLEY_SMI_ELEMENTS &&
- to_kind != FAST_DOUBLE_ELEMENTS;
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return to_kind == FAST_ELEMENTS ||
- to_kind == FAST_HOLEY_ELEMENTS;
- case FAST_ELEMENTS:
- return to_kind == FAST_HOLEY_ELEMENTS;
- case FAST_HOLEY_ELEMENTS:
- return false;
- default:
- return false;
- }
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
deleted file mode 100644
index ab31a33ee7..0000000000
--- a/deps/v8/src/elements-kind.h
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ELEMENTS_KIND_H_
-#define V8_ELEMENTS_KIND_H_
-
-#include "v8checks.h"
-
-namespace v8 {
-namespace internal {
-
-enum ElementsKind {
- // The "fast" kind for elements that only contain SMI values. Must be first
- // to make it possible to efficiently check maps for this kind.
- FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
-
- // The "fast" kind for tagged values. Must be second to make it possible to
- // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
- // together at once.
- FAST_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
-
- // The "fast" kind for unwrapped, non-tagged double values.
- FAST_DOUBLE_ELEMENTS,
- FAST_HOLEY_DOUBLE_ELEMENTS,
-
- // The "slow" kind.
- DICTIONARY_ELEMENTS,
- NON_STRICT_ARGUMENTS_ELEMENTS,
- // The "fast" kind for external arrays
- EXTERNAL_BYTE_ELEMENTS,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
- EXTERNAL_SHORT_ELEMENTS,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
- EXTERNAL_INT_ELEMENTS,
- EXTERNAL_UNSIGNED_INT_ELEMENTS,
- EXTERNAL_FLOAT_ELEMENTS,
- EXTERNAL_DOUBLE_ELEMENTS,
- EXTERNAL_PIXEL_ELEMENTS,
-
- // Derived constants from ElementsKind
- FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
- LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
- FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
- LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
- FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
- LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
- TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
-};
-
-const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
-const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
- FIRST_FAST_ELEMENTS_KIND + 1;
-
-void PrintElementsKind(FILE* out, ElementsKind kind);
-
-ElementsKind GetInitialFastElementsKind();
-
-ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
-
-int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
-
-
-inline bool IsFastElementsKind(ElementsKind kind) {
- ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
- return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
-}
-
-
-inline bool IsFastDoubleElementsKind(ElementsKind kind) {
- return kind == FAST_DOUBLE_ELEMENTS ||
- kind == FAST_HOLEY_DOUBLE_ELEMENTS;
-}
-
-
-inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS ||
- kind == FAST_HOLEY_SMI_ELEMENTS ||
- kind == FAST_ELEMENTS ||
- kind == FAST_HOLEY_ELEMENTS;
-}
-
-
-inline bool IsFastSmiElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS ||
- kind == FAST_HOLEY_SMI_ELEMENTS;
-}
-
-
-inline bool IsFastObjectElementsKind(ElementsKind kind) {
- return kind == FAST_ELEMENTS ||
- kind == FAST_HOLEY_ELEMENTS;
-}
-
-
-inline bool IsFastHoleyElementsKind(ElementsKind kind) {
- return kind == FAST_HOLEY_SMI_ELEMENTS ||
- kind == FAST_HOLEY_DOUBLE_ELEMENTS ||
- kind == FAST_HOLEY_ELEMENTS;
-}
-
-
-inline bool IsHoleyElementsKind(ElementsKind kind) {
- return IsFastHoleyElementsKind(kind) ||
- kind == DICTIONARY_ELEMENTS;
-}
-
-
-inline bool IsFastPackedElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS ||
- kind == FAST_DOUBLE_ELEMENTS ||
- kind == FAST_ELEMENTS;
-}
-
-
-inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
- if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) {
- return FAST_SMI_ELEMENTS;
- }
- if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
- return FAST_DOUBLE_ELEMENTS;
- }
- if (holey_kind == FAST_HOLEY_ELEMENTS) {
- return FAST_ELEMENTS;
- }
- return holey_kind;
-}
-
-
-inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
- if (packed_kind == FAST_SMI_ELEMENTS) {
- return FAST_HOLEY_SMI_ELEMENTS;
- }
- if (packed_kind == FAST_DOUBLE_ELEMENTS) {
- return FAST_HOLEY_DOUBLE_ELEMENTS;
- }
- if (packed_kind == FAST_ELEMENTS) {
- return FAST_HOLEY_ELEMENTS;
- }
- return packed_kind;
-}
-
-
-inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
- ASSERT(IsFastSmiElementsKind(from_kind));
- return (from_kind == FAST_SMI_ELEMENTS)
- ? FAST_ELEMENTS
- : FAST_HOLEY_ELEMENTS;
-}
-
-
-inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
- ElementsKind to_kind) {
- return (GetHoleyElementsKind(from_kind) == to_kind) ||
- (IsFastSmiElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind));
-}
-
-
-bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
- ElementsKind to_kind);
-
-
-inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
- return IsFastElementsKind(from_kind) &&
- from_kind != TERMINAL_FAST_ELEMENTS_KIND;
-}
-
-
-ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
- bool allow_only_packed);
-
-
-inline bool CanTransitionToMoreGeneralFastElementsKind(
- ElementsKind elements_kind,
- bool allow_only_packed) {
- return IsFastElementsKind(elements_kind) &&
- (elements_kind != TERMINAL_FAST_ELEMENTS_KIND &&
- (!allow_only_packed || elements_kind != FAST_ELEMENTS));
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ELEMENTS_KIND_H_
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 2692cb5384..26d3dc135c 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -39,14 +39,8 @@
// Inheritance hierarchy:
// - ElementsAccessorBase (abstract)
// - FastElementsAccessor (abstract)
-// - FastSmiOrObjectElementsAccessor
-// - FastPackedSmiElementsAccessor
-// - FastHoleySmiElementsAccessor
-// - FastPackedObjectElementsAccessor
-// - FastHoleyObjectElementsAccessor
+// - FastObjectElementsAccessor
// - FastDoubleElementsAccessor
-// - FastPackedDoubleElementsAccessor
-// - FastHoleyDoubleElementsAccessor
// - ExternalElementsAccessor (abstract)
// - ExternalByteElementsAccessor
// - ExternalUnsignedByteElementsAccessor
@@ -71,15 +65,9 @@ namespace internal {
// identical. Note that the order must match that of the ElementsKind enum for
// the |accessor_array[]| below to work.
#define ELEMENTS_LIST(V) \
- V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \
- V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \
- FixedArray) \
- V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
- V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
- V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \
- FixedDoubleArray) \
- V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
- FixedDoubleArray) \
+ V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray) \
+ V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
+ V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \
V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
@@ -151,6 +139,8 @@ void CopyObjectToObjectElements(FixedArray* from,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to->map() != HEAP->fixed_cow_array_map());
+ ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
+ ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -158,7 +148,7 @@ void CopyObjectToObjectElements(FixedArray* from,
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
- // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
+ // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@@ -170,15 +160,12 @@ void CopyObjectToObjectElements(FixedArray* from,
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
- ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
copy_size);
- if (IsFastObjectElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
+ if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
Heap* heap = from->GetHeap();
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
@@ -203,7 +190,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
#ifdef DEBUG
- // Fast object arrays cannot be uninitialized. Ensure they are already
+ // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@@ -213,7 +200,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
#endif
}
ASSERT(to != from);
- ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
+ ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
if (copy_size == 0) return;
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
@@ -229,7 +216,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
to->set_the_hole(i + to_start);
}
}
- if (IsFastObjectElementsKind(to_kind)) {
+ if (to_kind == FAST_ELEMENTS) {
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
@@ -247,7 +234,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
- ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
+ ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -255,7 +242,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
- // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
+ // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@@ -268,14 +255,14 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
- if (IsFastSmiElementsKind(to_kind)) {
+ if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
UNIMPLEMENTED();
return Failure::Exception();
} else {
MaybeObject* maybe_value = from->get(i + from_start);
Object* value;
- ASSERT(IsFastObjectElementsKind(to_kind));
- // Because Double -> Object elements transitions allocate HeapObjects
+ ASSERT(to_kind == FAST_ELEMENTS);
+ // Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
// iteratively, the allocate must succeed within a single GC cycle,
// otherwise the retry after the GC will also fail. In order to ensure
// that no GC is triggered, allocate HeapNumbers from old space if they
@@ -417,38 +404,6 @@ class ElementsAccessorBase : public ElementsAccessor {
virtual ElementsKind kind() const { return ElementsTraits::Kind; }
- static void ValidateContents(JSObject* holder, int length) {
- }
-
- static void ValidateImpl(JSObject* holder) {
- FixedArrayBase* fixed_array_base = holder->elements();
- // When objects are first allocated, its elements are Failures.
- if (fixed_array_base->IsFailure()) return;
- if (!fixed_array_base->IsHeapObject()) return;
- Map* map = fixed_array_base->map();
- // Arrays that have been shifted in place can't be verified.
- Heap* heap = holder->GetHeap();
- if (map == heap->raw_unchecked_one_pointer_filler_map() ||
- map == heap->raw_unchecked_two_pointer_filler_map() ||
- map == heap->free_space_map()) {
- return;
- }
- int length = 0;
- if (holder->IsJSArray()) {
- Object* length_obj = JSArray::cast(holder)->length();
- if (length_obj->IsSmi()) {
- length = Smi::cast(length_obj)->value();
- }
- } else {
- length = fixed_array_base->length();
- }
- ElementsAccessorSubclass::ValidateContents(holder, length);
- }
-
- virtual void Validate(JSObject* holder) {
- ElementsAccessorSubclass::ValidateImpl(holder);
- }
-
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
@@ -469,10 +424,10 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, BackingStore::cast(backing_store));
}
- MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
+ virtual MaybeObject* Get(Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -480,65 +435,62 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, BackingStore::cast(backing_store));
}
- MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
- JSObject* obj,
- uint32_t key,
- BackingStore* backing_store) {
+ static MaybeObject* GetImpl(Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ BackingStore* backing_store) {
return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
? backing_store->get(key)
: backing_store->GetHeap()->the_hole_value();
}
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
- Object* length) {
+ virtual MaybeObject* SetLength(JSArray* array,
+ Object* length) {
return ElementsAccessorSubclass::SetLengthImpl(
array, length, BackingStore::cast(array->elements()));
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- BackingStore* backing_store);
+ static MaybeObject* SetLengthImpl(JSObject* obj,
+ Object* length,
+ BackingStore* backing_store);
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
- JSArray* array,
- int capacity,
- int length) {
+ virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+ int capacity,
+ int length) {
return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
array,
capacity,
length);
}
- MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength(
- JSObject* obj,
- int capacity,
- int length) {
+ static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+ int capacity,
+ int length) {
UNIMPLEMENTED();
return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
+ virtual MaybeObject* Delete(JSObject* obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) = 0;
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind to_kind,
- uint32_t to_start,
- int copy_size) {
+ static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+ uint32_t from_start,
+ FixedArrayBase* to,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int copy_size) {
UNREACHABLE();
return NULL;
}
- MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind to_kind,
- uint32_t to_start,
- int copy_size,
- FixedArrayBase* from) {
+ virtual MaybeObject* CopyElements(JSObject* from_holder,
+ uint32_t from_start,
+ FixedArrayBase* to,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int copy_size,
+ FixedArrayBase* from) {
if (from == NULL) {
from = from_holder->elements();
}
@@ -549,11 +501,10 @@ class ElementsAccessorBase : public ElementsAccessor {
from, from_start, to, to_kind, to_start, copy_size);
}
- MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
- Object* receiver,
- JSObject* holder,
- FixedArray* to,
- FixedArrayBase* from) {
+ virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
+ JSObject* holder,
+ FixedArray* to,
+ FixedArrayBase* from) {
int len0 = to->length();
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
@@ -669,7 +620,6 @@ class FastElementsAccessor
KindTraits>(name) {}
protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
- friend class NonStrictArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore;
@@ -680,21 +630,10 @@ class FastElementsAccessor
Object* length_object,
uint32_t length) {
uint32_t old_capacity = backing_store->length();
- Object* old_length = array->length();
- bool same_size = old_length->IsSmi() &&
- static_cast<uint32_t>(Smi::cast(old_length)->value()) == length;
- ElementsKind kind = array->GetElementsKind();
-
- if (!same_size && IsFastElementsKind(kind) &&
- !IsFastHoleyElementsKind(kind)) {
- kind = GetHoleyElementsKind(kind);
- MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
// Check whether the backing store should be shrunk.
if (length <= old_capacity) {
- if (array->HasFastSmiOrObjectElements()) {
+ if (array->HasFastTypeElements()) {
MaybeObject* maybe_obj = array->EnsureWritableFastElements();
if (!maybe_obj->To(&backing_store)) return maybe_obj;
}
@@ -726,40 +665,39 @@ class FastElementsAccessor
MaybeObject* result = FastElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, new_capacity, length);
if (result->IsFailure()) return result;
- array->ValidateElements();
return length_object;
}
// Request conversion to slow elements.
return array->GetHeap()->undefined_value();
}
+};
+
+
+class FastObjectElementsAccessor
+ : public FastElementsAccessor<FastObjectElementsAccessor,
+ ElementsKindTraits<FAST_ELEMENTS>,
+ kPointerSize> {
+ public:
+ explicit FastObjectElementsAccessor(const char* name)
+ : FastElementsAccessor<FastObjectElementsAccessor,
+ ElementsKindTraits<FAST_ELEMENTS>,
+ kPointerSize>(name) {}
static MaybeObject* DeleteCommon(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- ASSERT(obj->HasFastSmiOrObjectElements() ||
- obj->HasFastDoubleElements() ||
+ uint32_t key) {
+ ASSERT(obj->HasFastElements() ||
+ obj->HasFastSmiOnlyElements() ||
obj->HasFastArgumentsElements());
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(obj->elements());
Heap* heap = obj->GetHeap();
+ FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
- backing_store =
- KindTraits::BackingStore::cast(
- FixedArray::cast(backing_store)->get(1));
+ backing_store = FixedArray::cast(backing_store->get(1));
} else {
- ElementsKind kind = KindTraits::Kind;
- if (IsFastPackedElementsKind(kind)) {
- MaybeObject* transitioned =
- obj->TransitionElementsKind(GetHoleyElementsKind(kind));
- if (transitioned->IsFailure()) return transitioned;
- }
- if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
- Object* writable;
- MaybeObject* maybe = obj->EnsureWritableFastElements();
- if (!maybe->ToObject(&writable)) return maybe;
- backing_store = KindTraits::BackingStore::cast(writable);
- }
+ Object* writable;
+ MaybeObject* maybe = obj->EnsureWritableFastElements();
+ if (!maybe->ToObject(&writable)) return maybe;
+ backing_store = FixedArray::cast(writable);
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
@@ -771,14 +709,15 @@ class FastElementsAccessor
// has too few used values, normalize it.
// To avoid doing the check on every delete we require at least
// one adjacent hole to the value being deleted.
+ Object* hole = heap->the_hole_value();
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck &&
!heap->InNewSpace(backing_store) &&
- ((key > 0 && backing_store->is_the_hole(key - 1)) ||
- (key + 1 < length && backing_store->is_the_hole(key + 1)))) {
+ ((key > 0 && backing_store->get(key - 1) == hole) ||
+ (key + 1 < length && backing_store->get(key + 1) == hole))) {
int num_used = 0;
for (int i = 0; i < backing_store->length(); ++i) {
- if (!backing_store->is_the_hole(i)) ++num_used;
+ if (backing_store->get(i) != hole) ++num_used;
// Bail out early if more than 1/4 is used.
if (4 * num_used > backing_store->length()) break;
}
@@ -791,75 +730,27 @@ class FastElementsAccessor
return heap->true_value();
}
- virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- return DeleteCommon(obj, key, mode);
- }
-
- static bool HasElementImpl(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- typename KindTraits::BackingStore* backing_store) {
- if (key >= static_cast<uint32_t>(backing_store->length())) {
- return false;
- }
- return !backing_store->is_the_hole(key);
- }
-
- static void ValidateContents(JSObject* holder, int length) {
-#if DEBUG
- FixedArrayBase* elements = holder->elements();
- Heap* heap = elements->GetHeap();
- Map* map = elements->map();
- ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
- (map == heap->fixed_array_map() ||
- map == heap->fixed_cow_array_map())) ||
- (IsFastDoubleElementsKind(KindTraits::Kind) ==
- ((map == heap->fixed_array_map() && length == 0) ||
- map == heap->fixed_double_array_map())));
- for (int i = 0; i < length; i++) {
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
- ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
- static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
- (IsFastHoleyElementsKind(KindTraits::Kind) ==
- backing_store->is_the_hole(i)));
- }
-#endif
- }
-};
-
-
-template<typename FastElementsAccessorSubclass,
- typename KindTraits>
-class FastSmiOrObjectElementsAccessor
- : public FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits,
- kPointerSize> {
- public:
- explicit FastSmiOrObjectElementsAccessor(const char* name)
- : FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits,
- kPointerSize>(name) {}
-
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
- if (IsFastSmiOrObjectElementsKind(to_kind)) {
- CopyObjectToObjectElements(
- FixedArray::cast(from), KindTraits::Kind, from_start,
- FixedArray::cast(to), to_kind, to_start, copy_size);
- } else if (IsFastDoubleElementsKind(to_kind)) {
- CopyObjectToDoubleElements(
- FixedArray::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start, copy_size);
- } else {
- UNREACHABLE();
+ switch (to_kind) {
+ case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_ELEMENTS: {
+ CopyObjectToObjectElements(
+ FixedArray::cast(from), ElementsTraits::Kind, from_start,
+ FixedArray::cast(to), to_kind, to_start, copy_size);
+ return from;
+ }
+ case FAST_DOUBLE_ELEMENTS:
+ CopyObjectToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
+ return from;
+ default:
+ UNREACHABLE();
}
return to->GetHeap()->undefined_value();
}
@@ -868,85 +759,51 @@ class FastSmiOrObjectElementsAccessor
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
- JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
- obj->HasFastSmiElements()
- ? JSObject::kAllowSmiElements
- : JSObject::kDontAllowSmiElements;
+ JSObject::SetFastElementsCapacityMode set_capacity_mode =
+ obj->HasFastSmiOnlyElements()
+ ? JSObject::kAllowSmiOnlyElements
+ : JSObject::kDontAllowSmiOnlyElements;
return obj->SetFastElementsCapacityAndLength(capacity,
length,
set_capacity_mode);
}
-};
-
-
-class FastPackedSmiElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
- FastPackedSmiElementsAccessor,
- ElementsKindTraits<FAST_SMI_ELEMENTS> > {
- public:
- explicit FastPackedSmiElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastPackedSmiElementsAccessor,
- ElementsKindTraits<FAST_SMI_ELEMENTS> >(name) {}
-};
-
-
-class FastHoleySmiElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
- FastHoleySmiElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> > {
- public:
- explicit FastHoleySmiElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastHoleySmiElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> >(name) {}
-};
-
-
-class FastPackedObjectElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
- FastPackedObjectElementsAccessor,
- ElementsKindTraits<FAST_ELEMENTS> > {
- public:
- explicit FastPackedObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastPackedObjectElementsAccessor,
- ElementsKindTraits<FAST_ELEMENTS> >(name) {}
-};
+ protected:
+ friend class FastElementsAccessor<FastObjectElementsAccessor,
+ ElementsKindTraits<FAST_ELEMENTS>,
+ kPointerSize>;
-class FastHoleyObjectElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
- FastHoleyObjectElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_ELEMENTS> > {
- public:
- explicit FastHoleyObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastHoleyObjectElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
+ virtual MaybeObject* Delete(JSObject* obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
+ return DeleteCommon(obj, key);
+ }
};
-template<typename FastElementsAccessorSubclass,
- typename KindTraits>
class FastDoubleElementsAccessor
- : public FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits,
+ : public FastElementsAccessor<FastDoubleElementsAccessor,
+ ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize> {
public:
explicit FastDoubleElementsAccessor(const char* name)
- : FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits,
+ : FastElementsAccessor<FastDoubleElementsAccessor,
+ ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize>(name) {}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
- return obj->SetFastDoubleElementsCapacityAndLength(capacity,
- length);
+ return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
}
protected:
+ friend class ElementsAccessorBase<FastDoubleElementsAccessor,
+ ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
+ friend class FastElementsAccessor<FastDoubleElementsAccessor,
+ ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
+ kDoubleSize>;
+
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
@@ -954,15 +811,12 @@ class FastDoubleElementsAccessor
uint32_t to_start,
int copy_size) {
switch (to_kind) {
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
return CopyDoubleToObjectElements(
FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
to_kind, to_start, copy_size);
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
FixedDoubleArray::cast(to),
to_start, copy_size);
@@ -972,35 +826,26 @@ class FastDoubleElementsAccessor
}
return to->GetHeap()->undefined_value();
}
-};
-
-
-class FastPackedDoubleElementsAccessor
- : public FastDoubleElementsAccessor<
- FastPackedDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> > {
- public:
- friend class ElementsAccessorBase<FastPackedDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
- explicit FastPackedDoubleElementsAccessor(const char* name)
- : FastDoubleElementsAccessor<
- FastPackedDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >(name) {}
-};
+ virtual MaybeObject* Delete(JSObject* obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
+ int length = obj->IsJSArray()
+ ? Smi::cast(JSArray::cast(obj)->length())->value()
+ : FixedDoubleArray::cast(obj->elements())->length();
+ if (key < static_cast<uint32_t>(length)) {
+ FixedDoubleArray::cast(obj->elements())->set_the_hole(key);
+ }
+ return obj->GetHeap()->true_value();
+ }
-class FastHoleyDoubleElementsAccessor
- : public FastDoubleElementsAccessor<
- FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> > {
- public:
- friend class ElementsAccessorBase<
- FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >;
- explicit FastHoleyDoubleElementsAccessor(const char* name)
- : FastDoubleElementsAccessor<
- FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >(name) {}
+ static bool HasElementImpl(Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedDoubleArray* backing_store) {
+ return key < static_cast<uint32_t>(backing_store->length()) &&
+ !backing_store->is_the_hole(key);
+ }
};
@@ -1021,28 +866,27 @@ class ExternalElementsAccessor
friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
ElementsKindTraits<Kind> >;
- MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
- JSObject* obj,
- uint32_t key,
- BackingStore* backing_store) {
+ static MaybeObject* GetImpl(Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ BackingStore* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
? backing_store->get(key)
: backing_store->GetHeap()->undefined_value();
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- BackingStore* backing_store) {
+ static MaybeObject* SetLengthImpl(JSObject* obj,
+ Object* length,
+ BackingStore* backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ virtual MaybeObject* Delete(JSObject* obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
// External arrays always ignore deletes.
return obj->GetHeap()->true_value();
}
@@ -1158,11 +1002,10 @@ class DictionaryElementsAccessor
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
- MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
- SeededNumberDictionary* dict,
- JSArray* array,
- Object* length_object,
- uint32_t length) {
+ static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
+ JSArray* array,
+ Object* length_object,
+ uint32_t length) {
if (length == 0) {
// If the length of a slow array is reset to zero, we clear
// the array and flush backing storage. This has the added
@@ -1214,10 +1057,9 @@ class DictionaryElementsAccessor
return length_object;
}
- MUST_USE_RESULT static MaybeObject* DeleteCommon(
- JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ static MaybeObject* DeleteCommon(JSObject* obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
Isolate* isolate = obj->GetIsolate();
Heap* heap = isolate->heap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
@@ -1260,23 +1102,20 @@ class DictionaryElementsAccessor
return heap->true_value();
}
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind to_kind,
- uint32_t to_start,
- int copy_size) {
+ static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+ uint32_t from_start,
+ FixedArrayBase* to,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int copy_size) {
switch (to_kind) {
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
CopyDictionaryToObjectElements(
SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDictionaryToDoubleElements(
SeededNumberDictionary::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
@@ -1292,17 +1131,16 @@ class DictionaryElementsAccessor
friend class ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >;
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ virtual MaybeObject* Delete(JSObject* obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key, mode);
}
- MUST_USE_RESULT static MaybeObject* GetImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- SeededNumberDictionary* backing_store) {
+ static MaybeObject* GetImpl(Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ SeededNumberDictionary* backing_store) {
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = backing_store->ValueAt(entry);
@@ -1348,10 +1186,10 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
NonStrictArgumentsElementsAccessor,
ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
- MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArray* parameter_map) {
+ static MaybeObject* GetImpl(Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArray* parameter_map) {
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
Context* context = Context::cast(parameter_map->get(0));
@@ -1378,19 +1216,18 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArray* parameter_map) {
+ static MaybeObject* SetLengthImpl(JSObject* obj,
+ Object* length,
+ FixedArray* parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
// correctly implement [[DefineOwnProperty]] on arrays.
UNIMPLEMENTED();
return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ virtual MaybeObject* Delete(JSObject* obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
FixedArray* parameter_map = FixedArray::cast(obj->elements());
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
@@ -1403,21 +1240,18 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
- // It's difficult to access the version of DeleteCommon that is declared
- // in the templatized super class, call the concrete implementation in
- // the class for the most generalized ElementsKind subclass.
- return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
+ return FastObjectElementsAccessor::DeleteCommon(obj, key);
}
}
return obj->GetHeap()->true_value();
}
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind to_kind,
- uint32_t to_start,
- int copy_size) {
+ static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+ uint32_t from_start,
+ FixedArrayBase* to,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int copy_size) {
FixedArray* parameter_map = FixedArray::cast(from);
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
@@ -1470,7 +1304,7 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
if (array->IsDictionary()) {
return elements_accessors_[DICTIONARY_ELEMENTS];
} else {
- return elements_accessors_[FAST_HOLEY_ELEMENTS];
+ return elements_accessors_[FAST_ELEMENTS];
}
case EXTERNAL_BYTE_ARRAY_TYPE:
return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];
@@ -1520,8 +1354,8 @@ void ElementsAccessor::TearDown() {
template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
- ElementsKindTraits>::
+MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
+ ElementsKindTraits>::
SetLengthImpl(JSObject* obj,
Object* length,
typename ElementsKindTraits::BackingStore* backing_store) {
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 822fca50ee..51d402d341 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -28,7 +28,6 @@
#ifndef V8_ELEMENTS_H_
#define V8_ELEMENTS_H_
-#include "elements-kind.h"
#include "objects.h"
#include "heap.h"
#include "isolate.h"
@@ -46,10 +45,6 @@ class ElementsAccessor {
virtual ElementsKind kind() const = 0;
const char* name() const { return name_; }
- // Checks the elements of an object for consistency, asserting when a problem
- // is found.
- virtual void Validate(JSObject* obj) = 0;
-
// Returns true if a holder contains an element with the specified key
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
@@ -65,19 +60,18 @@ class ElementsAccessor {
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual MaybeObject* Get(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
+ virtual MaybeObject* Get(Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store = NULL) = 0;
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
- Object* new_length) = 0;
+ virtual MaybeObject* SetLength(JSArray* holder,
+ Object* new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying
// backing store as necessary. This method does NOT honor the semantics of
@@ -85,14 +79,14 @@ class ElementsAccessor {
// elements. This method should only be called for array expansion OR by
// runtime JavaScript code that use InternalArrays and don't care about
// EcmaScript 5.1 semantics.
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
- int capacity,
- int length) = 0;
+ virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+ int capacity,
+ int length) = 0;
// Deletes an element in an object, returning a new elements backing store.
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
+ virtual MaybeObject* Delete(JSObject* holder,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
@@ -107,28 +101,26 @@ class ElementsAccessor {
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
// ignored.
- MUST_USE_RESULT virtual MaybeObject* CopyElements(
- JSObject* source_holder,
- uint32_t source_start,
- FixedArrayBase* destination,
- ElementsKind destination_kind,
- uint32_t destination_start,
- int copy_size,
- FixedArrayBase* source = NULL) = 0;
-
- MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
- FixedArrayBase* to,
- ElementsKind to_kind,
- FixedArrayBase* from = NULL) {
+ virtual MaybeObject* CopyElements(JSObject* source_holder,
+ uint32_t source_start,
+ FixedArrayBase* destination,
+ ElementsKind destination_kind,
+ uint32_t destination_start,
+ int copy_size,
+ FixedArrayBase* source = NULL) = 0;
+
+ MaybeObject* CopyElements(JSObject* from_holder,
+ FixedArrayBase* to,
+ ElementsKind to_kind,
+ FixedArrayBase* from = NULL) {
return CopyElements(from_holder, 0, to, to_kind, 0,
kCopyToEndAndInitializeToHole, from);
}
- MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
- Object* receiver,
- JSObject* holder,
- FixedArray* to,
- FixedArrayBase* from = NULL) = 0;
+ virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
+ JSObject* holder,
+ FixedArray* to,
+ FixedArrayBase* from = NULL) = 0;
// Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) {
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 1c29ea13b2..6bb7893746 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -775,7 +775,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
instance_size != JSObject::kHeaderSize) {
Handle<Map> initial_map = NewMap(type,
instance_size,
- GetInitialFastElementsKind());
+ FAST_SMI_ONLY_ELEMENTS);
function->set_initial_map(*initial_map);
initial_map->set_constructor(*function);
}
@@ -1013,11 +1013,10 @@ void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
- uint32_t length,
EnsureElementsMode mode) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
- array->EnsureCanContainElements(*elements, length, mode));
+ array->EnsureCanContainElements(*elements, mode));
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index a999b15f21..06aad1bef6 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -216,10 +216,9 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
- Handle<Map> NewMap(
- InstanceType type,
- int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+ Handle<Map> NewMap(InstanceType type,
+ int instance_size,
+ ElementsKind elements_kind = FAST_ELEMENTS);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@@ -270,14 +269,13 @@ class Factory {
Handle<JSModule> NewJSModule();
// JS arrays are pretenured when allocated by the parser.
- Handle<JSArray> NewJSArray(
- int capacity,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArray(int capacity,
+ ElementsKind elements_kind = FAST_ELEMENTS,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ ElementsKind elements_kind = FAST_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
void SetElementsCapacityAndLength(Handle<JSArray> array,
@@ -289,7 +287,6 @@ class Factory {
void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
- uint32_t length,
EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index fc9a1db454..62a9782859 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -150,7 +150,6 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features.
-DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations,
true,
@@ -198,8 +197,6 @@ DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
-DEFINE_bool(array_index_dehoisting, false,
- "perform array index dehoisting")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 78cdd0cedb..7178bd413a 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -211,9 +211,6 @@ class StackFrame BASE_EMBEDDED {
virtual void SetCallerFp(Address caller_fp) = 0;
- // Manually changes value of fp in this object.
- void UpdateFp(Address fp) { state_.fp = fp; }
-
Address* pc_address() const { return state_.pc_address; }
// Get the id of this stack frame.
diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h
index ccd962a982..1a57268326 100644
--- a/deps/v8/src/func-name-inferrer.h
+++ b/deps/v8/src/func-name-inferrer.h
@@ -88,8 +88,6 @@ class FuncNameInferrer : public ZoneObject {
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
- if (entries_stack_.is_empty())
- funcs_to_infer_.Clear();
}
private:
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 97b033f848..25d4ffe89b 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -345,9 +345,6 @@ F FUNCTION_CAST(Address addr) {
#define INLINE(header) inline __attribute__((always_inline)) header
#define NO_INLINE(header) __attribute__((noinline)) header
#endif
-#elif defined(_MSC_VER) && !defined(DEBUG)
-#define INLINE(header) __forceinline header
-#define NO_INLINE(header) header
#else
#define INLINE(header) inline header
#define NO_INLINE(header) header
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 9d79db2466..e12895a283 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -595,24 +595,12 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) {
void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
- Object* obj = Object::cast(new_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
- ASSERT(heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
- if (obj->IsExternalAsciiString()) {
- ExternalAsciiString* string = ExternalAsciiString::cast(obj);
- ASSERT(String::IsAscii(string->GetChars(), string->length()));
- }
+ ASSERT(heap_->InNewSpace(new_space_strings_[i]));
+ ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
- Object* obj = Object::cast(old_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
- ASSERT(!heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
- if (obj->IsExternalAsciiString()) {
- ExternalAsciiString* string = ExternalAsciiString::cast(obj);
- ASSERT(String::IsAscii(string->GetChars(), string->length()));
- }
+ ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
+ ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
}
#endif
}
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index a224e2bf50..e2e0e9eff3 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -171,9 +171,6 @@ Heap::Heap()
global_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
- // Put a dummy entry in the remembered pages so we can find the list the
- // minidump even if there are no real unmapped pages.
- RememberUnmappedPage(NULL, false);
}
@@ -808,7 +805,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
- size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
+ size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
@@ -2023,7 +2020,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_pre_allocated_property_fields(0);
map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
- map->init_prototype_transitions(undefined_value());
+ map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
@@ -2162,15 +2159,15 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array());
- meta_map()->init_prototype_transitions(undefined_value());
+ meta_map()->set_prototype_transitions(empty_fixed_array());
fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array());
- fixed_array_map()->init_prototype_transitions(undefined_value());
+ fixed_array_map()->set_prototype_transitions(empty_fixed_array());
oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array());
- oddball_map()->init_prototype_transitions(undefined_value());
+ oddball_map()->set_prototype_transitions(empty_fixed_array());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@@ -2469,7 +2466,7 @@ bool Heap::CreateApiObjects() {
// bottleneck to trap the Smi-only -> fast elements transition, and there
// appears to be no benefit for optimize this case.
Map* new_neander_map = Map::cast(obj);
- new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
+ new_neander_map->set_elements_kind(FAST_ELEMENTS);
set_neander_map(new_neander_map);
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
@@ -3050,7 +3047,6 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
}
JSMessageObject* message = JSMessageObject::cast(result);
message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
- message->initialize_elements();
message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->set_type(type);
message->set_arguments(arguments);
@@ -3327,8 +3323,6 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
return Failure::OutOfMemoryException();
}
- ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
-
Map* map = external_ascii_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@@ -3754,7 +3748,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Check the state of the object
ASSERT(JSObject::cast(result)->HasFastProperties());
- ASSERT(JSObject::cast(result)->HasFastObjectElements());
+ ASSERT(JSObject::cast(result)->HasFastElements());
return result;
}
@@ -3799,7 +3793,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
- ASSERT(map->has_fast_object_elements());
+ ASSERT(map->has_fast_elements());
// If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object
@@ -3916,7 +3910,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
- ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
+ ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
+ JSObject::cast(obj)->HasFastElements());
return obj;
}
@@ -3961,9 +3956,6 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
ASSERT(capacity >= length);
- if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- }
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
@@ -3984,7 +3976,8 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
}
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
+ ASSERT(elements_kind == FAST_ELEMENTS ||
+ elements_kind == FAST_SMI_ONLY_ELEMENTS);
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedArray(capacity);
} else {
@@ -4010,7 +4003,6 @@ MaybeObject* Heap::AllocateJSArrayWithElements(
array->set_elements(elements);
array->set_length(Smi::FromInt(elements->length()));
- array->ValidateElements();
return array;
}
@@ -4495,16 +4487,6 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
-
-#ifdef DEBUG
- if (FLAG_verify_heap) {
- // Initialize string's content to ensure ASCII-ness (character range 0-127)
- // as required when verifying the heap.
- char* dest = SeqAsciiString::cast(result)->GetChars();
- memset(dest, 0x0F, length * kCharSize);
- }
-#endif // DEBUG
-
return result;
}
@@ -4551,13 +4533,13 @@ MaybeObject* Heap::AllocateJSArray(
Context* global_context = isolate()->context()->global_context();
JSFunction* array_function = global_context->array_function();
Map* map = array_function->initial_map();
- Object* maybe_map_array = global_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(elements_kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- map = Map::cast(maybe_transitioned_map);
- }
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ map = Map::cast(global_context->double_js_array_map());
+ } else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
+ map = Map::cast(global_context->object_js_array_map());
+ } else {
+ ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+ ASSERT(map == global_context->smi_js_array_map());
}
return AllocateJSObjectFromMap(map, pretenure);
@@ -4842,7 +4824,9 @@ MaybeObject* Heap::AllocateGlobalContext() {
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(global_context_map());
- context->set_js_array_maps(undefined_value());
+ context->set_smi_js_array_map(undefined_value());
+ context->set_double_js_array_map(undefined_value());
+ context->set_object_js_array_map(undefined_value());
ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext());
return result;
@@ -5826,6 +5810,16 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
+intptr_t Heap::PromotedSpaceSize() {
+ return old_pointer_space_->Size()
+ + old_data_space_->Size()
+ + code_space_->Size()
+ + map_space_->Size()
+ + cell_space_->Size()
+ + lo_space_->Size();
+}
+
+
intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects()
+ old_data_space_->SizeOfObjects()
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 55e7135609..b91416fb08 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -621,7 +621,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateMap(
InstanceType instance_type,
int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+ ElementsKind elements_kind = FAST_ELEMENTS);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@@ -1342,7 +1342,7 @@ class Heap {
PretenureFlag pretenure);
inline intptr_t PromotedTotalSize() {
- return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ return PromotedSpaceSize() + PromotedExternalMemorySize();
}
// True if we have reached the allocation limit in the old generation that
@@ -1363,6 +1363,19 @@ class Heap {
static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
+ // When we sweep lazily we initially guess that there is no garbage on the
+ // heap and set the limits for the next GC accordingly. As we sweep we find
+ // out that some of the pages contained garbage and we have to adjust
+ // downwards the size of the heap. This means the limits that control the
+ // timing of the next GC also need to be adjusted downwards.
+ void LowerOldGenLimits(intptr_t adjustment) {
+ size_of_old_gen_at_last_old_space_gc_ -= adjustment;
+ old_gen_promotion_limit_ =
+ OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+ old_gen_allocation_limit_ =
+ OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+ }
+
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 : 3;
intptr_t limit =
@@ -1455,7 +1468,7 @@ class Heap {
intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5;
- if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
+ if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
return false;
}
@@ -1493,6 +1506,7 @@ class Heap {
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
+ intptr_t PromotedSpaceSize();
intptr_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; }
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 57a186280b..f8c021c2a8 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -1603,7 +1603,6 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
SetOperandAt(1, object);
set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnMaps);
- int map_transitions = 0;
for (int i = 0;
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
@@ -1625,20 +1624,13 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
case CONSTANT_FUNCTION:
types_.Add(types->at(i));
break;
- case MAP_TRANSITION:
- // We should just ignore these since they are not relevant to a load
- // operation. This means we will deopt if we actually see this map
- // from optimized code.
- map_transitions++;
- break;
default:
break;
}
}
}
- if (types_.length() + map_transitions == types->length() &&
- FLAG_deoptimize_uncommon_cases) {
+ if (types_.length() == types->length() && FLAG_deoptimize_uncommon_cases) {
SetFlag(kUseGVN);
} else {
SetAllSideEffects();
@@ -1685,9 +1677,6 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
- if (hole_check_mode_ == PERFORM_HOLE_CHECK) {
- stream->Add(" check_hole");
- }
}
@@ -1739,7 +1728,7 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
key_load->key(),
- OMIT_HOLE_CHECK);
+ HLoadKeyedFastElement::OMIT_HOLE_CHECK);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
object(), index);
map_check->InsertBefore(this);
@@ -1787,11 +1776,8 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel");
break;
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -1888,12 +1874,9 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -1908,13 +1891,7 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- ElementsKind from_kind = original_map()->elements_kind();
- ElementsKind to_kind = transitioned_map()->elements_kind();
- stream->Add(" %p [%s] -> %p [%s]",
- *original_map(),
- ElementsAccessor::ForKind(from_kind)->name(),
- *transitioned_map(),
- ElementsAccessor::ForKind(to_kind)->name());
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index c68befd15a..9d262fc811 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -2083,21 +2083,28 @@ class HCheckMaps: public HTemplateInstruction<2> {
HCheckMaps* check_map = new HCheckMaps(object, map);
SmallMapList* map_set = check_map->map_set();
- // Since transitioned elements maps of the initial map don't fail the map
- // check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
- check_map->ClearGVNFlag(kDependsOnElementsKind);
-
- ElementsKind kind = map->elements_kind();
- bool packed = IsFastPackedElementsKind(kind);
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- Map* transitioned_map =
- map->LookupElementsTransitionMap(kind, NULL);
- if (transitioned_map) {
- map_set->Add(Handle<Map>(transitioned_map));
- }
- };
+ // If the map to check has the untransitioned elements, it can be hoisted
+ // above TransitionElements instructions.
+ if (map->has_fast_smi_only_elements()) {
+ check_map->ClearGVNFlag(kDependsOnElementsKind);
+ }
+
+ Map* transitioned_fast_element_map =
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL);
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ map_set->Add(Handle<Map>(transitioned_fast_element_map));
+ }
+ Map* transitioned_double_map =
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL);
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ map_set->Add(Handle<Map>(transitioned_double_map));
+ }
map_set->Sort();
+
return check_map;
}
@@ -3939,28 +3946,15 @@ class HLoadFunctionPrototype: public HUnaryOperation {
virtual bool DataEquals(HValue* other) { return true; }
};
-class ArrayInstructionInterface {
- public:
- virtual HValue* GetKey() = 0;
- virtual void SetKey(HValue* key) = 0;
- virtual void SetIndexOffset(uint32_t index_offset) = 0;
- virtual bool IsDehoisted() = 0;
- virtual void SetDehoisted(bool is_dehoisted) = 0;
- virtual ~ArrayInstructionInterface() { };
-};
-
-enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
-
-class HLoadKeyedFastElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+class HLoadKeyedFastElement: public HTemplateInstruction<2> {
public:
+ enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
+
HLoadKeyedFastElement(HValue* obj,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
- : hole_check_mode_(hole_check_mode),
- index_offset_(0),
- is_dehoisted_(false) {
+ : hole_check_mode_(hole_check_mode) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
set_representation(Representation::Tagged());
@@ -3970,12 +3964,6 @@ class HLoadKeyedFastElement
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
@@ -3994,43 +3982,26 @@ class HLoadKeyedFastElement
virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastElement()) return false;
HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
- if (is_dehoisted_ && index_offset_ != other_load->index_offset_)
- return false;
return hole_check_mode_ == other_load->hole_check_mode_;
}
private:
HoleCheckMode hole_check_mode_;
- uint32_t index_offset_;
- bool is_dehoisted_;
};
-class HLoadKeyedFastDoubleElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
public:
- HLoadKeyedFastDoubleElement(
- HValue* elements,
- HValue* key,
- HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
- : index_offset_(0),
- is_dehoisted_(false),
- hole_check_mode_(hole_check_mode) {
- SetOperandAt(0, elements);
- SetOperandAt(1, key);
- set_representation(Representation::Double());
+ HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) {
+ SetOperandAt(0, elements);
+ SetOperandAt(1, key);
+ set_representation(Representation::Double());
SetGVNFlag(kDependsOnDoubleArrayElements);
SetFlag(kUseGVN);
- }
+ }
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
@@ -4039,38 +4010,21 @@ class HLoadKeyedFastDoubleElement
: Representation::Integer32();
}
- bool RequiresHoleCheck() {
- return hole_check_mode_ == PERFORM_HOLE_CHECK;
- }
-
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
protected:
- virtual bool DataEquals(HValue* other) {
- if (!other->IsLoadKeyedFastDoubleElement()) return false;
- HLoadKeyedFastDoubleElement* other_load =
- HLoadKeyedFastDoubleElement::cast(other);
- return hole_check_mode_ == other_load->hole_check_mode_;
- }
-
- private:
- uint32_t index_offset_;
- bool is_dehoisted_;
- HoleCheckMode hole_check_mode_;
+ virtual bool DataEquals(HValue* other) { return true; }
};
-class HLoadKeyedSpecializedArrayElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
ElementsKind elements_kind)
- : elements_kind_(elements_kind),
- index_offset_(0),
- is_dehoisted_(false) {
+ : elements_kind_(elements_kind) {
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
@@ -4098,12 +4052,6 @@ class HLoadKeyedSpecializedArrayElement
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Range* InferRange(Zone* zone);
@@ -4119,8 +4067,6 @@ class HLoadKeyedSpecializedArrayElement
private:
ElementsKind elements_kind_;
- uint32_t index_offset_;
- bool is_dehoisted_;
};
@@ -4242,12 +4188,11 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
};
-class HStoreKeyedFastElement
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
+class HStoreKeyedFastElement: public HTemplateInstruction<3> {
public:
HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
ElementsKind elements_kind = FAST_ELEMENTS)
- : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
+ : elements_kind_(elements_kind) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
@@ -4265,14 +4210,8 @@ class HStoreKeyedFastElement
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
bool value_is_smi() {
- return IsFastSmiElementsKind(elements_kind_);
+ return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
}
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
if (value_is_smi()) {
@@ -4288,18 +4227,14 @@ class HStoreKeyedFastElement
private:
ElementsKind elements_kind_;
- uint32_t index_offset_;
- bool is_dehoisted_;
};
-class HStoreKeyedFastDoubleElement
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
+class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
public:
HStoreKeyedFastDoubleElement(HValue* elements,
HValue* key,
- HValue* val)
- : index_offset_(0), is_dehoisted_(false) {
+ HValue* val) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
SetOperandAt(2, val);
@@ -4319,12 +4254,6 @@ class HStoreKeyedFastDoubleElement
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
@@ -4335,21 +4264,16 @@ class HStoreKeyedFastDoubleElement
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
-
- private:
- uint32_t index_offset_;
- bool is_dehoisted_;
};
-class HStoreKeyedSpecializedArrayElement
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
+class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
public:
HStoreKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
HValue* val,
ElementsKind elements_kind)
- : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
+ : elements_kind_(elements_kind) {
SetGVNFlag(kChangesSpecializedArrayElements);
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
@@ -4377,19 +4301,11 @@ class HStoreKeyedSpecializedArrayElement
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
private:
ElementsKind elements_kind_;
- uint32_t index_offset_;
- bool is_dehoisted_;
};
@@ -4436,19 +4352,9 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
transitioned_map_(transitioned_map) {
SetOperandAt(0, object);
SetFlag(kUseGVN);
- // Don't set GVN DependOn flags here. That would defeat GVN's detection of
- // congruent HTransitionElementsKind instructions. Instruction hoisting
- // handles HTransitionElementsKind instruction specially, explicitly adding
- // DependsOn flags during its dependency calculations.
SetGVNFlag(kChangesElementsKind);
- if (original_map->has_fast_double_elements()) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
- if (transitioned_map->has_fast_double_elements()) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
+ SetGVNFlag(kChangesElementsPointer);
+ SetGVNFlag(kChangesNewSpacePromotion);
set_representation(Representation::Tagged());
}
@@ -4686,7 +4592,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
HValue* context() { return OperandAt(0); }
ElementsKind boilerplate_elements_kind() const {
if (!boilerplate_object_->IsJSObject()) {
- return TERMINAL_FAST_ELEMENTS_KIND;
+ return FAST_ELEMENTS;
}
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index b9ad8af529..b722f9f821 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -1709,23 +1709,23 @@ void HGlobalValueNumberer::ProcessLoopBlock(
bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
if (instr->IsTransitionElementsKind()) {
// It's possible to hoist transitions out of a loop as long as the
- // hoisting wouldn't move the transition past an instruction that has a
- // DependsOn flag for anything it changes.
+ // hoisting wouldn't move the transition past a DependsOn of one of it's
+ // changes or any instructions that might change an objects map or
+ // elements contents.
+ GVNFlagSet changes = instr->ChangesFlags();
GVNFlagSet hoist_depends_blockers =
- HValue::ConvertChangesToDependsFlags(instr->ChangesFlags());
-
- // In addition, the transition must not be hoisted above elements kind
- // changes, or if the transition is destructive to the elements buffer,
- // changes to array pointer or array contents.
- GVNFlagSet hoist_change_blockers;
- hoist_change_blockers.Add(kChangesElementsKind);
+ HValue::ConvertChangesToDependsFlags(changes);
+ // In addition to not hoisting transitions above other instructions that
+ // change dependencies that the transition changes, it must not be
+ // hoisted above map changes and stores to an elements backing store
+ // that the transition might change.
+ GVNFlagSet hoist_change_blockers = changes;
+ hoist_change_blockers.Add(kChangesMaps);
HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr);
if (trans->original_map()->has_fast_double_elements()) {
- hoist_change_blockers.Add(kChangesElementsPointer);
hoist_change_blockers.Add(kChangesDoubleArrayElements);
}
if (trans->transitioned_map()->has_fast_double_elements()) {
- hoist_change_blockers.Add(kChangesElementsPointer);
hoist_change_blockers.Add(kChangesArrayElements);
}
if (FLAG_trace_gvn) {
@@ -2758,7 +2758,6 @@ HGraph* HGraphBuilder::CreateGraph() {
sce.Process();
graph()->EliminateRedundantBoundsChecks();
- graph()->DehoistSimpleArrayIndexComputations();
return graph();
}
@@ -3017,6 +3016,7 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
HBoundsCheck* check = HBoundsCheck::cast(i);
check->ReplaceAllUsesWith(check->index());
+ isolate()->counters()->array_bounds_checks_seen()->Increment();
if (!FLAG_array_bounds_checks_elimination) continue;
int32_t offset;
@@ -3035,8 +3035,10 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
*data_p = bb_data_list;
} else if (data->OffsetIsCovered(offset)) {
check->DeleteAndReplaceWith(NULL);
+ isolate()->counters()->array_bounds_checks_removed()->Increment();
} else if (data->BasicBlock() == bb) {
data->CoverCheck(check, offset);
+ isolate()->counters()->array_bounds_checks_removed()->Increment();
} else {
int32_t new_lower_offset = offset < data->LowerOffset()
? offset
@@ -3080,93 +3082,6 @@ void HGraph::EliminateRedundantBoundsChecks() {
}
-static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
- HValue* index = array_operation->GetKey();
-
- HConstant* constant;
- HValue* subexpression;
- int32_t sign;
- if (index->IsAdd()) {
- sign = 1;
- HAdd* add = HAdd::cast(index);
- if (add->left()->IsConstant()) {
- subexpression = add->right();
- constant = HConstant::cast(add->left());
- } else if (add->right()->IsConstant()) {
- subexpression = add->left();
- constant = HConstant::cast(add->right());
- } else {
- return;
- }
- } else if (index->IsSub()) {
- sign = -1;
- HSub* sub = HSub::cast(index);
- if (sub->left()->IsConstant()) {
- subexpression = sub->right();
- constant = HConstant::cast(sub->left());
- } else if (sub->right()->IsConstant()) {
- subexpression = sub->left();
- constant = HConstant::cast(sub->right());
- } return;
- } else {
- return;
- }
-
- if (!constant->HasInteger32Value()) return;
- int32_t value = constant->Integer32Value() * sign;
- // We limit offset values to 30 bits because we want to avoid the risk of
- // overflows when the offset is added to the object header size.
- if (value >= 1 << 30 || value < 0) return;
- array_operation->SetKey(subexpression);
- if (index->HasNoUses()) {
- index->DeleteAndReplaceWith(NULL);
- }
- ASSERT(value >= 0);
- array_operation->SetIndexOffset(static_cast<uint32_t>(value));
- array_operation->SetDehoisted(true);
-}
-
-
-void HGraph::DehoistSimpleArrayIndexComputations() {
- if (!FLAG_array_index_dehoisting) return;
-
- HPhase phase("H_Dehoist index computations", this);
- for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstruction* instr = blocks()->at(i)->first();
- instr != NULL;
- instr = instr->next()) {
- ArrayInstructionInterface* array_instruction = NULL;
- if (instr->IsLoadKeyedFastElement()) {
- HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsLoadKeyedFastDoubleElement()) {
- HLoadKeyedFastDoubleElement* op =
- HLoadKeyedFastDoubleElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsLoadKeyedSpecializedArrayElement()) {
- HLoadKeyedSpecializedArrayElement* op =
- HLoadKeyedSpecializedArrayElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyedFastElement()) {
- HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyedFastDoubleElement()) {
- HStoreKeyedFastDoubleElement* op =
- HStoreKeyedFastDoubleElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyedSpecializedArrayElement()) {
- HStoreKeyedSpecializedArrayElement* op =
- HStoreKeyedSpecializedArrayElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else {
- continue;
- }
- DehoistArrayIndex(array_instruction);
- }
- }
-}
-
-
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
current_block()->AddInstruction(instr);
@@ -3966,7 +3881,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
new(zone()) HLoadKeyedFastElement(
environment()->ExpressionStackAt(2), // Enum cache.
environment()->ExpressionStackAt(0), // Iteration index.
- OMIT_HOLE_CHECK));
+ HLoadKeyedFastElement::OMIT_HOLE_CHECK));
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
@@ -4257,7 +4172,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
if (boilerplate->HasFastDoubleElements()) {
*total_size += FixedDoubleArray::SizeFor(elements->length());
- } else if (boilerplate->HasFastObjectElements()) {
+ } else if (boilerplate->HasFastElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
@@ -4464,13 +4379,11 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Representation::Integer32()));
switch (boilerplate_elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
// Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(value));
// Fall through.
case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastElement(
elements,
key,
@@ -4478,7 +4391,6 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
boilerplate_elements_kind));
break;
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
key,
value));
@@ -5236,12 +5148,9 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
break;
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -5266,16 +5175,13 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
ASSERT(val != NULL);
switch (elements_kind) {
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
return new(zone()) HStoreKeyedFastDoubleElement(
elements, checked_key, val);
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
// Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(val));
// Fall through.
case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
return new(zone()) HStoreKeyedFastElement(
elements, checked_key, val, elements_kind);
default:
@@ -5284,13 +5190,10 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
}
}
// It's an element load (!is_store).
- HoleCheckMode mode = IsFastPackedElementsKind(elements_kind) ?
- OMIT_HOLE_CHECK :
- PERFORM_HOLE_CHECK;
- if (IsFastDoubleElementsKind(elements_kind)) {
- return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key, mode);
- } else { // Smi or Object elements.
- return new(zone()) HLoadKeyedFastElement(elements, checked_key, mode);
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
+ } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
+ return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
}
@@ -5298,30 +5201,15 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
- HValue* dependency,
Handle<Map> map,
bool is_store) {
- HInstruction* mapcheck =
- AddInstruction(new(zone()) HCheckMaps(object, map, dependency));
- // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
- // on a HElementsTransition instruction. The flag can also be removed if the
- // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
- // ElementsKind transitions. Finally, the dependency can be removed for stores
- // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
- // generated store code.
- if (dependency ||
- (map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
- (map->elements_kind() == FAST_ELEMENTS && is_store)) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
- bool fast_smi_only_elements = map->has_fast_smi_elements();
- bool fast_elements = map->has_fast_object_elements();
+ HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMaps(object, map));
+ bool fast_smi_only_elements = map->has_fast_smi_only_elements();
+ bool fast_elements = map->has_fast_elements();
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
if (is_store && (fast_elements || fast_smi_only_elements)) {
- HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
- elements, isolate()->factory()->fixed_array_map());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
- AddInstruction(check_cow_map);
+ AddInstruction(new(zone()) HCheckMaps(
+ elements, isolate()->factory()->fixed_array_map()));
}
HInstruction* length = NULL;
HInstruction* checked_key = NULL;
@@ -5374,8 +5262,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ElementsKind elements_kind = map->elements_kind();
- if (IsFastElementsKind(elements_kind) &&
- elements_kind != GetInitialFastElementsKind()) {
+ if (elements_kind == FAST_DOUBLE_ELEMENTS ||
+ elements_kind == FAST_ELEMENTS) {
possible_transitioned_maps.Add(map);
}
}
@@ -5389,17 +5277,12 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
int num_untransitionable_maps = 0;
Handle<Map> untransitionable_map;
- HTransitionElementsKind* transition = NULL;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ASSERT(map->IsMap());
if (!transition_target.at(i).is_null()) {
- ASSERT(Map::IsValidElementsTransition(
- map->elements_kind(),
- transition_target.at(i)->elements_kind()));
- transition = new(zone()) HTransitionElementsKind(
- object, map, transition_target.at(i));
- AddInstruction(transition);
+ AddInstruction(new(zone()) HTransitionElementsKind(
+ object, map, transition_target.at(i)));
} else {
type_todo[map->elements_kind()] = true;
if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
@@ -5419,7 +5302,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
: BuildLoadKeyedGeneric(object, key));
} else {
instr = AddInstruction(BuildMonomorphicElementAccess(
- object, key, val, transition, untransitionable_map, is_store));
+ object, key, val, untransitionable_map, is_store));
}
*has_side_effects |= instr->HasObservableSideEffects();
instr->set_position(position);
@@ -5436,18 +5319,20 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
- // Generated code assumes that FAST_* and DICTIONARY_ELEMENTS ElementsKinds
- // are handled before external arrays.
- STATIC_ASSERT(FAST_SMI_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
+ // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
+ // arrays.
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) {
- // After having handled FAST_* and DICTIONARY_ELEMENTS, we need to add some
- // code that's executed for all external array cases.
+ // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
+ // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
+ // that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@@ -5469,8 +5354,10 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_true);
HInstruction* access;
- if (IsFastElementsKind(elements_kind)) {
- if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_ELEMENTS ||
+ elements_kind == FAST_DOUBLE_ELEMENTS) {
+ if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
AddInstruction(new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map(),
elements_kind_branch));
@@ -5557,7 +5444,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
: BuildLoadKeyedGeneric(obj, key);
} else {
AddInstruction(new(zone()) HCheckNonSmi(obj));
- instr = BuildMonomorphicElementAccess(obj, key, val, NULL, map, is_store);
+ instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
}
} else if (expr->GetReceiverTypes() != NULL &&
!expr->GetReceiverTypes()->is_empty()) {
@@ -5775,39 +5662,6 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr,
}
-class FunctionSorter {
- public:
- FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
- FunctionSorter(int index, int ticks, int ast_length, int src_length)
- : index_(index),
- ticks_(ticks),
- ast_length_(ast_length),
- src_length_(src_length) { }
-
- int index() const { return index_; }
- int ticks() const { return ticks_; }
- int ast_length() const { return ast_length_; }
- int src_length() const { return src_length_; }
-
- private:
- int index_;
- int ticks_;
- int ast_length_;
- int src_length_;
-};
-
-
-static int CompareHotness(void const* a, void const* b) {
- FunctionSorter const* function1 = reinterpret_cast<FunctionSorter const*>(a);
- FunctionSorter const* function2 = reinterpret_cast<FunctionSorter const*>(b);
- int diff = function1->ticks() - function2->ticks();
- if (diff != 0) return -diff;
- diff = function1->ast_length() - function2->ast_length();
- if (diff != 0) return diff;
- return function1->src_length() - function2->src_length();
-}
-
-
void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
HValue* receiver,
SmallMapList* types,
@@ -5816,73 +5670,51 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
+ int count = 0;
HBasicBlock* join = NULL;
- FunctionSorter order[kMaxCallPolymorphism];
- int ordered_functions = 0;
- for (int i = 0;
- i < types->length() && ordered_functions < kMaxCallPolymorphism;
- ++i) {
+ for (int i = 0; i < types->length() && count < kMaxCallPolymorphism; ++i) {
Handle<Map> map = types->at(i);
if (expr->ComputeTarget(map, name)) {
- order[ordered_functions++] =
- FunctionSorter(i,
- expr->target()->shared()->profiler_ticks(),
- InliningAstSize(expr->target()),
- expr->target()->shared()->SourceSize());
- }
- }
-
- qsort(reinterpret_cast<void*>(&order[0]),
- ordered_functions,
- sizeof(order[0]),
- &CompareHotness);
+ if (count == 0) {
+ // Only needed once.
+ AddInstruction(new(zone()) HCheckNonSmi(receiver));
+ join = graph()->CreateBasicBlock();
+ }
+ ++count;
+ HBasicBlock* if_true = graph()->CreateBasicBlock();
+ HBasicBlock* if_false = graph()->CreateBasicBlock();
+ HCompareMap* compare =
+ new(zone()) HCompareMap(receiver, map, if_true, if_false);
+ current_block()->Finish(compare);
- for (int fn = 0; fn < ordered_functions; ++fn) {
- int i = order[fn].index();
- Handle<Map> map = types->at(i);
- if (fn == 0) {
- // Only needed once.
- AddInstruction(new(zone()) HCheckNonSmi(receiver));
- join = graph()->CreateBasicBlock();
- }
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(receiver, map, if_true, if_false);
- current_block()->Finish(compare);
+ set_current_block(if_true);
+ AddCheckConstantFunction(expr, receiver, map, false);
+ if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
+ PrintF("Trying to inline the polymorphic call to %s\n",
+ *name->ToCString());
+ }
+ if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
+ // Trying to inline will signal that we should bailout from the
+ // entire compilation by setting stack overflow on the visitor.
+ if (HasStackOverflow()) return;
+ } else {
+ HCallConstantFunction* call =
+ new(zone()) HCallConstantFunction(expr->target(), argument_count);
+ call->set_position(expr->position());
+ PreProcessCall(call);
+ AddInstruction(call);
+ if (!ast_context()->IsEffect()) Push(call);
+ }
- set_current_block(if_true);
- expr->ComputeTarget(map, name);
- AddCheckConstantFunction(expr, receiver, map, false);
- if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
- Handle<JSFunction> caller = info()->closure();
- SmartArrayPointer<char> caller_name =
- caller->shared()->DebugName()->ToCString();
- PrintF("Trying to inline the polymorphic call to %s from %s\n",
- *name->ToCString(),
- *caller_name);
- }
- if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
- // Trying to inline will signal that we should bailout from the
- // entire compilation by setting stack overflow on the visitor.
- if (HasStackOverflow()) return;
- } else {
- HCallConstantFunction* call =
- new(zone()) HCallConstantFunction(expr->target(), argument_count);
- call->set_position(expr->position());
- PreProcessCall(call);
- AddInstruction(call);
- if (!ast_context()->IsEffect()) Push(call);
+ if (current_block() != NULL) current_block()->Goto(join);
+ set_current_block(if_false);
}
-
- if (current_block() != NULL) current_block()->Goto(join);
- set_current_block(if_false);
}
// Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
- if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
+ if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
} else {
HValue* context = environment()->LookupContext();
@@ -5931,11 +5763,14 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
}
-static const int kNotInlinable = 1000000000;
-
-
-int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
- if (!FLAG_use_inlining) return kNotInlinable;
+bool HGraphBuilder::TryInline(CallKind call_kind,
+ Handle<JSFunction> target,
+ ZoneList<Expression*>* arguments,
+ HValue* receiver,
+ int ast_id,
+ int return_id,
+ ReturnHandlingFlag return_handling) {
+ if (!FLAG_use_inlining) return false;
// Precondition: call is monomorphic and we have found a target with the
// appropriate arity.
@@ -5947,43 +5782,25 @@ int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (target_shared->SourceSize() >
Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) {
TraceInline(target, caller, "target text too big");
- return kNotInlinable;
+ return false;
}
// Target must be inlineable.
if (!target->IsInlineable()) {
TraceInline(target, caller, "target not inlineable");
- return kNotInlinable;
+ return false;
}
if (target_shared->dont_inline() || target_shared->dont_optimize()) {
TraceInline(target, caller, "target contains unsupported syntax [early]");
- return kNotInlinable;
+ return false;
}
int nodes_added = target_shared->ast_node_count();
- return nodes_added;
-}
-
-
-bool HGraphBuilder::TryInline(CallKind call_kind,
- Handle<JSFunction> target,
- ZoneList<Expression*>* arguments,
- HValue* receiver,
- int ast_id,
- int return_id,
- ReturnHandlingFlag return_handling) {
- int nodes_added = InliningAstSize(target);
- if (nodes_added == kNotInlinable) return false;
-
- Handle<JSFunction> caller = info()->closure();
-
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]");
return false;
}
- Handle<SharedFunctionInfo> target_shared(target->shared());
-
#if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context.
CompilationInfo* outer_info = info();
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 5c8ddbf6d2..a52bf3baad 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -267,7 +267,6 @@ class HGraph: public ZoneObject {
void AssignDominators();
void ReplaceCheckedValues();
void EliminateRedundantBoundsChecks();
- void DehoistSimpleArrayIndexComputations();
void PropagateDeoptimizingMark();
// Returns false if there are phi-uses of the arguments-object
@@ -1011,7 +1010,6 @@ class HGraphBuilder: public AstVisitor {
// Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr);
- int InliningAstSize(Handle<JSFunction> target);
bool TryInline(CallKind call_kind,
Handle<JSFunction> target,
ZoneList<Expression*>* arguments,
@@ -1093,7 +1091,6 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
- HValue* dependency,
Handle<Map> map,
bool is_store);
HValue* HandlePolymorphicElementAccess(HValue* object,
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 4ead80b0ec..929b485ebf 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -640,9 +640,6 @@ class Assembler : public AssemblerBase {
static const byte kJccShortPrefix = 0x70;
static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
static const byte kJcShortOpcode = kJccShortPrefix | carry;
- static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
- static const byte kJzShortOpcode = kJccShortPrefix | zero;
-
// ---------------------------------------------------------------------------
// Code generation
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index be46ff216f..a36763db20 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -900,7 +900,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -1003,8 +1003,7 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
+ __ LoadInitialArrayMap(array_function, scratch, elements_array);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
@@ -1275,11 +1274,11 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(&prepare_generic_code_call);
__ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
__ mov(ebx, Operand(esp, 0));
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS,
+ FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
edi,
eax,
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index df04b289b4..a1c6edd0fa 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -3822,24 +3822,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 9;
+ static const int kRegExpExecuteArguments = 8;
__ EnterApiExitFrame(kRegExpExecuteArguments);
- // Argument 9: Pass current isolate address.
- __ mov(Operand(esp, 8 * kPointerSize),
+ // Argument 8: Pass current isolate address.
+ __ mov(Operand(esp, 7 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
- // Argument 7: Start (high end) of backtracking stack memory area.
+ // Argument 6: Start (high end) of backtracking stack memory area.
__ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 6 * kPointerSize), esi);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
+ __ mov(Operand(esp, 5 * kPointerSize), esi);
// Argument 5: static offsets vector buffer.
__ mov(Operand(esp, 4 * kPointerSize),
@@ -3902,9 +3898,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ cmp(eax, 1);
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
+ __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
__ j(equal, &success);
Label failure;
__ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
@@ -7063,8 +7057,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
{ REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
{ REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
@@ -7336,9 +7330,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(edi, &double_elements);
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(eax, &smi_element);
- __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
+ __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -7360,7 +7354,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ pop(edx);
__ jmp(&slow_elements);
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
@@ -7373,15 +7367,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK);
__ ret(0);
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
FixedArrayBase::kHeaderSize), eax);
__ ret(0);
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ push(edx);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index eb6868729b..cff6454ff1 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -351,7 +351,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@@ -372,7 +372,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
-void ElementsTransitionGenerator::GenerateSmiToDouble(
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index d153e18ee9..710cbaf19b 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -91,12 +91,10 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
}
-// All debug break stubs support padding for LiveEdit.
-const bool Debug::FramePaddingLayout::kIsSupported = true;
-
#define __ ACCESS_MASM(masm)
+
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs,
@@ -105,13 +103,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
- __ push(Immediate(Smi::FromInt(
- Debug::FramePaddingLayout::kPaddingValue)));
- }
- __ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
-
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
@@ -143,10 +134,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
CEntryStub ceb(1);
__ CallStub(&ceb);
- // Automatically find register that could be used after register restore.
- // We need one register for padding skip instructions.
- Register unused_reg = { -1 };
-
// Restore the register values containing object pointers from the
// expression stack.
for (int i = kNumJSCallerSaved; --i >= 0;) {
@@ -155,29 +142,15 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if (FLAG_debug_code) {
__ Set(reg, Immediate(kDebugZapValue));
}
- bool taken = reg.code() == esi.code();
if ((object_regs & (1 << r)) != 0) {
__ pop(reg);
- taken = true;
}
if ((non_object_regs & (1 << r)) != 0) {
__ pop(reg);
__ SmiUntag(reg);
- taken = true;
- }
- if (!taken) {
- unused_reg = reg;
}
}
- ASSERT(unused_reg.code() != -1);
-
- // Read current padding counter and skip corresponding number of words.
- __ pop(unused_reg);
- // We divide stored value by 2 (untagging) and multiply it by word's size.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
-
// Get rid of the internal frame.
}
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 9727ea01ec..266afce204 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -1649,8 +1649,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_constant_fast_elements =
- IsFastObjectElementsKind(constant_elements_kind);
+ bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1661,7 +1660,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
@@ -1673,9 +1672,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1703,9 +1703,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
+ if (constant_elements_kind == FAST_ELEMENTS) {
+ // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
+ // transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index a091ff1aa6..a591af125e 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -889,25 +889,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
&non_double_value,
DONT_DO_SMI_CHECK);
- // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
- // and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
ebx,
edi,
&slow);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
&slow);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1622,7 +1622,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ mov(eax, edx);
__ Ret();
__ bind(&fail);
@@ -1727,12 +1727,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(address());
}
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1753,18 +1753,14 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
address, test_instruction_address, delta);
}
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
+ // Patch with a short conditional jump. There must be a
+ // short jump-if-carry/not-carry at this position.
Address jmp_address = test_instruction_address - delta;
- ASSERT((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+ ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode);
+ Condition cc = *jmp_address == Assembler::kJncShortOpcode
+ ? not_zero
+ : zero;
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 18f2a39a83..d416662a1e 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -2274,35 +2274,40 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register result = ToRegister(instr->result());
int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
Handle<String> name = instr->hydrogen()->name();
- Label done;
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- } else {
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ mov(ecx, name);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
Label next;
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
__ j(not_equal, &next, Label::kNear);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ jmp(&done, Label::kNear);
__ bind(&next);
}
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ j(not_equal, &generic, Label::kNear);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ jmp(&done, Label::kNear);
+ __ bind(&generic);
+ __ mov(ecx, name);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
}
- if (need_generic) {
- __ mov(ecx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
}
@@ -2377,10 +2382,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
__ and_(temp, Map::kElementsKindMask);
__ shr(temp, Map::kElementsKindShift);
- __ cmp(temp, GetInitialFastElementsKind());
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
+ __ cmp(temp, FAST_ELEMENTS);
+ __ j(equal, &ok, Label::kNear);
__ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
__ j(less, &fail, Label::kNear);
__ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
@@ -2423,11 +2426,9 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ mov(result,
- BuildFastArrayOperand(instr->elements(),
- instr->key(),
+ BuildFastArrayOperand(instr->elements(), instr->key(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
+ FixedArray::kHeaderSize - kHeapObjectTag));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2441,24 +2442,18 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
- __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ FAST_DOUBLE_ELEMENTS,
+ offset);
+ __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ movdbl(result, double_load_operand);
}
@@ -2467,8 +2462,7 @@ Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
+ uint32_t offset) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
@@ -2477,14 +2471,10 @@ Operand LCodeGen::BuildFastArrayOperand(
Abort("array index constant value too big");
}
return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
+ constant_value * (1 << shift_size) + offset);
} else {
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- offset + (additional_index << shift_size));
+ return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset);
}
}
@@ -2493,10 +2483,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
+ instr->key(), elements_kind, 0));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
@@ -2532,12 +2519,9 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -2941,13 +2925,11 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
} else {
- Label negative_sign;
Label done;
- // Deoptimize on unordered.
+ // Deoptimize on negative numbers.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
- __ j(below, &negative_sign, Label::kNear);
+ DeoptimizeIf(below, instr->environment());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Check for negative zero.
@@ -2963,21 +2945,10 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, Operand(input_reg));
+
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
- __ jmp(&done, Label::kNear);
-
- // Non-zero negative reaches here
- __ bind(&negative_sign);
- // Truncate, then compare and compensate
- __ cvttsd2si(output_reg, Operand(input_reg));
- __ cvtsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(equal, &done, Label::kNear);
- __ sub(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
-
__ bind(&done);
}
}
@@ -3436,10 +3407,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
+ instr->key(), elements_kind, 0));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
@@ -3463,12 +3431,9 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3483,21 +3448,31 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Operand operand = BuildFastArrayOperand(
- instr->object(),
- instr->key(),
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ mov(operand, value);
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ int offset =
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+ __ mov(FieldOperand(elements, offset), value);
+ } else {
+ __ mov(FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ value);
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
- ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ lea(key, operand);
+ __ lea(key,
+ FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
__ RecordWrite(elements,
key,
value,
@@ -3525,11 +3500,8 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ movdbl(double_store_operand, value);
}
@@ -3560,23 +3532,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable);
__ mov(new_map_reg, to_map);
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register object_reg = ToRegister(instr->object());
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
ASSERT_NE(instr->temp_reg(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp_reg()), kDontSaveFPRegs);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
@@ -4436,9 +4407,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,
@@ -4600,9 +4570,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
// Deopt if the literal boilerplate ElementsKind is of a type different than
// the expected one. The check isn't necessary if the boilerplate has already
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 392bca2af0..a2810f05c3 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -242,8 +242,7 @@ class LCodeGen BASE_EMBEDDED {
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
+ uint32_t offset);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index fbc75d0e32..5adaf431bd 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -1990,7 +1990,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
+ key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
@@ -2092,9 +2093,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index be64b2fcbe..09f0b0d729 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -1238,13 +1238,13 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ LLoadKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
@@ -1255,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@@ -1275,7 +1275,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1776,7 +1775,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1799,7 +1797,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@@ -1825,7 +1822,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 1b614867cd..60e38a6c13 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -382,12 +382,10 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
+ Map::kMaximumBitField2FastElementValue);
j(above, fail, distance);
}
@@ -395,26 +393,23 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
+ Map::kMaximumBitField2FastSmiOnlyElementValue);
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
+ Map::kMaximumBitField2FastElementValue);
j(above, fail, distance);
}
-void MacroAssembler::CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
+ Map::kMaximumBitField2FastSmiOnlyElementValue);
j(above, fail, distance);
}
@@ -498,18 +493,24 @@ void MacroAssembler::CompareMap(Register obj,
CompareMapMode mode) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_fast_element_map));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_double_map));
}
}
}
@@ -2160,38 +2161,27 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
- mov(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmp(map_in_out, FieldOperand(scratch, offset));
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- mov(map_in_out, FieldOperand(scratch, offset));
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
+ Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
mov(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
map_out,
scratch,
&done);
@@ -2576,7 +2566,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
+ masm_(Isolate::Current(), address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index c71cad897f..66d1ce7d38 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -235,8 +235,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
- Register map_out,
- bool can_have_holes);
+ Register map_out);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -358,9 +357,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
+ void CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index cba1660789..0029f33b1a 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -42,30 +42,28 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
- * - edx : Current character. Must be loaded using LoadCurrentCharacter
- * before using any of the dispatch methods. Temporarily stores the
- * index of capture start after a matching pass for a global regexp.
- * - edi : Current position in input, as negative offset from end of string.
+ * - edx : current character. Must be loaded using LoadCurrentCharacter
+ * before using any of the dispatch methods.
+ * - edi : current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - esi : end of input (points to byte after last character in input).
- * - ebp : Frame pointer. Used to access arguments, local variables and
+ * - ebp : frame pointer. Used to access arguments, local variables and
* RegExp registers.
- * - esp : Points to tip of C stack.
- * - ecx : Points to tip of backtrack stack
+ * - esp : points to tip of C stack.
+ * - ecx : points to tip of backtrack stack
*
* The registers eax and ebx are free to use for computations.
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
- * - Isolate* isolate (address of the current isolate)
+ * - Isolate* isolate (Address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system)
- * - stack_area_base (high end of the memory area to use as
+ * - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (address of end of string)
- * - start of input (address of first character in string)
+ * - end of input (Address of end of string)
+ * - start of input (Address of first character in string)
* - start index (character index of start)
* - String* input_string (location of a handle containing the string)
* --- frame alignment (if applicable) ---
@@ -74,10 +72,9 @@ namespace internal {
* - backup of caller esi
* - backup of caller edi
* - backup of caller ebx
- * - success counter (only for global regexps to count matches).
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
- * - register 0 ebp[-4] (only positions must be stored in the first
+ * - register 0 ebp[-4] (Only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
*
@@ -709,16 +706,13 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() {
- STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
- if (!global()) {
- __ Set(eax, Immediate(FAILURE));
- }
+ ASSERT(FAILURE == 0); // Return value for failure is zero.
+ __ Set(eax, Immediate(0));
__ jmp(&exit_label_);
}
Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
- Label return_eax;
// Finalize code - write the entry point code now we know how many
// registers we need.
@@ -737,7 +731,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(esi);
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
@@ -757,13 +750,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
+ __ jmp(&exit_label_);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(ebx);
__ or_(eax, eax);
// If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_eax);
+ __ j(not_zero, &exit_label_);
__ bind(&stack_ok);
// Load start index for later use.
@@ -790,8 +783,19 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
-#ifdef WIN32
- // Ensure that we write to each stack page, in order. Skipping a page
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1
+ // Fill in stack push order, to avoid accessing across an unwritten
+ // page (a problem on Windows).
+ __ mov(ecx, kRegisterZero);
+ Label init_loop;
+ __ bind(&init_loop);
+ __ mov(Operand(ebp, ecx, times_1, +0), eax);
+ __ sub(ecx, Immediate(kPointerSize));
+ __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
+ __ j(greater, &init_loop);
+ }
+ // Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
const int kRegistersPerPage = kPageSize / kPointerSize;
@@ -800,45 +804,20 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
}
-#endif // WIN32
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ mov(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- if (num_saved_registers_ > 8) {
- __ mov(ecx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ mov(Operand(ebp, ecx, times_1, 0), eax);
- __ sub(ecx, Immediate(kPointerSize));
- __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
- __ j(greater, &init_loop);
- } else { // Unroll the loop.
- for (int i = 0; i < num_saved_registers_; i++) {
- __ mov(register_location(i), eax);
- }
- }
- }
// Initialize backtrack stack pointer.
__ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-
+ // Load previous char as initial value of current-character.
+ Label at_start;
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ __ j(equal, &at_start);
+ LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
+ __ bind(&at_start);
+ __ mov(current_character(), '\n');
+ __ jmp(&start_label_);
+
// Exit code:
if (success_label_.is_linked()) {
@@ -857,10 +836,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(eax, register_location(i));
- if (i == 0 && global()) {
- // Keep capture start in edx for the zero-length check later.
- __ mov(edx, eax);
- }
// Convert to index from start of string, not end.
__ add(eax, ecx);
if (mode_ == UC16) {
@@ -869,54 +844,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(Operand(ebx, i * kPointerSize), eax);
}
}
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- // Increment success counter.
- __ inc(Operand(ebp, kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ mov(ecx, Operand(ebp, kNumOutputRegisters));
- __ sub(ecx, Immediate(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmp(ecx, Immediate(num_saved_registers_));
- __ j(less, &exit_label_);
-
- __ mov(Operand(ebp, kNumOutputRegisters), ecx);
- // Advance the location for output.
- __ add(Operand(ebp, kRegisterOutput),
- Immediate(num_saved_registers_ * kPointerSize));
-
- // Prepare eax to initialize registers with its value in the next run.
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
-
- // Special case for zero-length matches.
- // edx: capture start index
- __ cmp(edi, edx);
- // Not a zero-length match, restart.
- __ j(not_equal, &load_char_start_regexp);
- // edi (offset from the end) is zero if we already reached the end.
- __ test(edi, edi);
- __ j(zero, &exit_label_, Label::kNear);
- // Advance current position after a zero-length match.
- if (mode_ == UC16) {
- __ add(edi, Immediate(2));
- } else {
- __ inc(edi);
- }
- __ jmp(&load_char_start_regexp);
- } else {
- __ mov(eax, Immediate(SUCCESS));
- }
+ __ mov(eax, Immediate(SUCCESS));
}
-
+ // Exit and return eax
__ bind(&exit_label_);
- if (global()) {
- // Return the number of successful captures.
- __ mov(eax, Operand(ebp, kSuccessfulCaptures));
- }
-
- __ bind(&return_eax);
// Skip esp past regexp registers.
__ lea(esp, Operand(ebp, kBackup_ebx));
// Restore callee-save registers.
@@ -946,7 +877,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ or_(eax, eax);
// If returning non-zero, we should end execution with the given
// result as return value.
- __ j(not_zero, &return_eax);
+ __ j(not_zero, &exit_label_);
__ pop(edi);
__ pop(backtrack_stackpointer());
@@ -993,7 +924,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
+ __ jmp(&exit_label_);
}
CodeDesc code_desc;
@@ -1112,9 +1043,8 @@ void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
}
-bool RegExpMacroAssemblerIA32::Succeed() {
+void RegExpMacroAssemblerIA32::Succeed() {
__ jmp(&success_label_);
- return global();
}
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index f631ffcccb..78cd06958c 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -111,7 +111,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
+ virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@@ -135,11 +135,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer - local stack variables.
@@ -148,8 +144,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_esi = kFramePointer - kPointerSize;
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
- static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index 478d4ce5cb..13ddf35cae 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,12 +40,12 @@ namespace internal {
typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
+ const byte*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 71740ac5c7..e148e2f525 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -1462,31 +1462,16 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiElements(ebx, &call_builtin);
+ __ CheckFastSmiOnlyElements(ebx, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
- // Restore edi.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- ebx,
- edi,
&call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ bind(&fast_object);
@@ -3833,7 +3818,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(eax, &transition_elements_kind);
}
@@ -3858,7 +3843,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ j(not_equal, &miss_force_generic);
__ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
__ mov(FieldOperand(edi,
@@ -3866,7 +3851,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
times_half_pointer_size,
FixedArray::kHeaderSize), eax);
} else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
+ ASSERT(elements_kind == FAST_ELEMENTS);
// Do the store and update the write barrier.
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 134ef8b843..643fa88413 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -352,9 +352,9 @@ void IC::Clear(Address address) {
return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
- case Code::COMPARE_IC: return CompareIC::Clear(address, target);
case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
+ case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not
// make any performance difference.
@@ -365,8 +365,9 @@ void IC::Clear(Address address) {
void CallICBase::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
+ State state = target->ic_state();
+ if (state == UNINITIALIZED) return;
Code* code =
Isolate::Current()->stub_cache()->FindCallInitialize(
target->arguments_count(),
@@ -409,17 +410,6 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
}
-void CompareIC::Clear(Address address, Code* target) {
- // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
- if (target->major_key() != CodeStub::CompareIC) return;
- // Only clear CompareICs that can retain objects.
- if (target->compare_state() != KNOWN_OBJECTS) return;
- Token::Value op = CompareIC::ComputeOperation(target);
- SetTargetAtAddress(address, GetRawUninitialized(op));
- PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
-}
-
-
static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined();
}
@@ -1644,7 +1634,8 @@ Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
return string_stub();
} else {
ASSERT(receiver_map->has_dictionary_elements() ||
- receiver_map->has_fast_smi_or_object_elements() ||
+ receiver_map->has_fast_elements() ||
+ receiver_map->has_fast_smi_only_elements() ||
receiver_map->has_fast_double_elements() ||
receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
@@ -1659,7 +1650,8 @@ Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
StubKind stub_kind,
StrictModeFlag strict_mode,
Handle<Code> generic_stub) {
- if (receiver->HasFastSmiOrObjectElements() ||
+ if (receiver->HasFastElements() ||
+ receiver->HasFastSmiOnlyElements() ||
receiver->HasExternalArrayElements() ||
receiver->HasFastDoubleElements() ||
receiver->HasDictionaryElements()) {
@@ -1679,26 +1671,15 @@ Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
+ break;
case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
- case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case KeyedIC::STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- return JSObject::GetElementsTransitionMap(receiver,
- FAST_HOLEY_ELEMENTS);
- case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- return JSObject::GetElementsTransitionMap(receiver,
- FAST_HOLEY_DOUBLE_ELEMENTS);
- case KeyedIC::LOAD:
- case KeyedIC::STORE_NO_TRANSITION:
- case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
- UNREACHABLE();
break;
+ default:
+ UNREACHABLE();
+ return Handle<Map>::null();
}
- return Handle<Map>::null();
}
@@ -1758,54 +1739,30 @@ KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
if (allow_growth) {
// Handle growing array in stub if necessary.
- if (receiver->HasFastSmiElements()) {
+ if (receiver->HasFastSmiOnlyElements()) {
if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
- }
+ return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
}
if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
- }
+ return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
- }
+ return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
}
}
return STORE_AND_GROW_NO_TRANSITION;
} else {
// Handle only in-bounds elements accesses.
- if (receiver->HasFastSmiElements()) {
+ if (receiver->HasFastSmiOnlyElements()) {
if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_TRANSITION_SMI_TO_DOUBLE;
- }
+ return STORE_TRANSITION_SMI_TO_DOUBLE;
} else if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_TRANSITION_SMI_TO_OBJECT;
- }
+ return STORE_TRANSITION_SMI_TO_OBJECT;
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_TRANSITION_DOUBLE_TO_OBJECT;
- }
+ return STORE_TRANSITION_DOUBLE_TO_OBJECT;
}
}
return STORE_NO_TRANSITION;
@@ -2439,7 +2396,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
// Activate inlined smi code.
if (previous_type == BinaryOpIC::UNINITIALIZED) {
- PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(ic.address());
}
}
@@ -2500,14 +2457,6 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
}
-Code* CompareIC::GetRawUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
- Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code));
- return code;
-}
-
-
Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED);
return stub.GetCode();
@@ -2522,12 +2471,6 @@ CompareIC::State CompareIC::ComputeState(Code* target) {
}
-Token::Value CompareIC::ComputeOperation(Code* target) {
- ASSERT(target->major_key() == CodeStub::CompareIC);
- return static_cast<Token::Value>(target->compare_operation());
-}
-
-
const char* CompareIC::GetStateName(State state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index c1b95494de..56625525d4 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -378,16 +378,10 @@ class KeyedIC: public IC {
STORE_TRANSITION_SMI_TO_OBJECT,
STORE_TRANSITION_SMI_TO_DOUBLE,
STORE_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
STORE_AND_GROW_NO_TRANSITION,
STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT
+ STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT
};
static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
@@ -800,9 +794,6 @@ class CompareIC: public IC {
// Helper function for determining the state of a compare IC.
static State ComputeState(Code* target);
- // Helper function for determining the operation a compare IC is for.
- static Token::Value ComputeOperation(Code* target);
-
static const char* GetStateName(State state);
private:
@@ -813,13 +804,7 @@ class CompareIC: public IC {
Condition GetCondition() const { return ComputeCondition(op_); }
State GetState() { return ComputeState(target()); }
- static Code* GetRawUninitialized(Token::Value op);
-
- static void Clear(Address address, Code* target);
-
Token::Value op_;
-
- friend class IC;
};
@@ -832,8 +817,7 @@ class ToBooleanIC: public IC {
// Helper for BinaryOpIC and CompareIC.
-enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
+void PatchInlinedSmiCode(Address address);
} } // namespace v8::internal
diff --git a/deps/v8/src/incremental-marking-inl.h b/deps/v8/src/incremental-marking-inl.h
index 2dae6f207d..3e3d6c43fd 100644
--- a/deps/v8/src/incremental-marking-inl.h
+++ b/deps/v8/src/incremental-marking-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -100,7 +100,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj_size;
if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
- if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
+ if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
// If we have queued twice the heap size for rescanning then we are
// going around in circles, scanning the same objects again and again
// as the program mutates the heap faster than we can incrementally
@@ -118,29 +118,13 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
- Marking::WhiteToGrey(mark_bit);
+ WhiteToGrey(obj, mark_bit);
marking_deque_.PushGrey(obj);
}
-bool IncrementalMarking::MarkObjectAndPush(HeapObject* obj) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- if (!mark_bit.Get()) {
- WhiteToGreyAndPush(obj, mark_bit);
- return true;
- }
- return false;
-}
-
-
-bool IncrementalMarking::MarkObjectWithoutPush(HeapObject* obj) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- if (!mark_bit.Get()) {
- mark_bit.Set();
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
- return true;
- }
- return false;
+void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
+ Marking::WhiteToGrey(mark_bit);
}
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index 94afffa733..2413b67803 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -42,7 +42,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
state_(STOPPED),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(false),
- marker_(this, heap->mark_compact_collector()),
steps_count_(0),
steps_took_(0),
longest_step_(0.0),
@@ -664,22 +663,6 @@ void IncrementalMarking::Hurry() {
} else if (map == global_context_map) {
// Global contexts have weak fields.
VisitGlobalContext(Context::cast(obj), &marking_visitor);
- } else if (map->instance_type() == MAP_TYPE) {
- Map* map = Map::cast(obj);
- heap_->ClearCacheOnMap(map);
-
- // When map collection is enabled we have to mark through map's
- // transitions and back pointers in a special way to make these links
- // weak. Only maps for subclasses of JSReceiver can have transitions.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps &&
- map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
- marker_.MarkMapContents(map);
- } else {
- marking_visitor.VisitPointers(
- HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
- }
} else {
obj->Iterate(&marking_visitor);
}
@@ -824,6 +807,12 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
Map* map = obj->map();
if (map == filler_map) continue;
+ if (obj->IsMap()) {
+ Map* map = Map::cast(obj);
+ heap_->ClearCacheOnMap(map);
+ }
+
+
int size = obj->SizeFromMap(map);
bytes_to_process -= size;
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
@@ -841,22 +830,6 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
VisitGlobalContext(ctx, &marking_visitor);
- } else if (map->instance_type() == MAP_TYPE) {
- Map* map = Map::cast(obj);
- heap_->ClearCacheOnMap(map);
-
- // When map collection is enabled we have to mark through map's
- // transitions and back pointers in a special way to make these links
- // weak. Only maps for subclasses of JSReceiver can have transitions.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps &&
- map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
- marker_.MarkMapContents(map);
- } else {
- marking_visitor.VisitPointers(
- HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
- }
} else if (map->instance_type() == JS_FUNCTION_TYPE) {
marking_visitor.VisitPointers(
HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
@@ -978,7 +951,7 @@ void IncrementalMarking::ResetStepCounters() {
int64_t IncrementalMarking::SpaceLeftInOldSpace() {
- return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
+ return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index 39e8daed68..8cbe6c18e7 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -154,6 +154,8 @@ class IncrementalMarking {
inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+ inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
+
// Does white->black or keeps gray or black color. Returns true if converting
// white to black.
inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
@@ -167,16 +169,6 @@ class IncrementalMarking {
return true;
}
- // Marks the object grey and pushes it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for incremental marking only.
- INLINE(bool MarkObjectAndPush(HeapObject* obj));
-
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for incremental marking only.
- INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
-
inline int steps_count() {
return steps_count_;
}
@@ -268,7 +260,6 @@ class IncrementalMarking {
VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
- Marker<IncrementalMarking> marker_;
int steps_count_;
double steps_took_;
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index f51b4e1a96..f1c9b3ccfa 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -965,7 +965,7 @@ class Isolate {
// SerializerDeserializer state.
static const int kPartialSnapshotCacheCapacity = 1400;
- static const int kJSRegexpStaticOffsetsVectorSize = 128;
+ static const int kJSRegexpStaticOffsetsVectorSize = 50;
Address external_callback() {
return thread_local_top_.external_callback_;
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index c5e73ed653..2e9bda1d25 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -324,7 +324,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
index)));
if (index == -1) return isolate->factory()->null_value();
}
- ASSERT(last_match_info->HasFastObjectElements());
+ ASSERT(last_match_info->HasFastElements());
{
NoHandleAllocation no_handles;
@@ -429,7 +429,6 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
RegExpEngine::CompilationResult result =
RegExpEngine::Compile(&compile_data,
flags.is_ignore_case(),
- flags.is_global(),
flags.is_multiline(),
pattern,
sample_subject,
@@ -516,23 +515,7 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
}
-int RegExpImpl::GlobalOffsetsVectorSize(Handle<JSRegExp> regexp,
- int registers_per_match,
- int* max_matches) {
-#ifdef V8_INTERPRETED_REGEXP
- // Global loop in interpreted regexp is not implemented. Therefore we choose
- // the size of the offsets vector so that it can only store one match.
- *max_matches = 1;
- return registers_per_match;
-#else // V8_INTERPRETED_REGEXP
- int size = Max(registers_per_match, OffsetsVector::kStaticOffsetsVectorSize);
- *max_matches = size / registers_per_match;
- return size;
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-int RegExpImpl::IrregexpExecRaw(
+RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
@@ -634,7 +617,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
OffsetsVector registers(required_registers, isolate);
- int res = RegExpImpl::IrregexpExecRaw(
+ IrregexpResult res = RegExpImpl::IrregexpExecOnce(
jsregexp, subject, previous_index, Vector<int>(registers.vector(),
registers.length()));
if (res == RE_SUCCESS) {
@@ -2191,12 +2174,15 @@ int ActionNode::EatsAtLeast(int still_to_find,
void ActionNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
if (type_ == BEGIN_SUBMATCH) {
bm->SetRest(offset);
} else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
- on_success()->FillInBMInfo(offset, bm, not_at_start);
+ on_success()->FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
@@ -2218,11 +2204,15 @@ int AssertionNode::EatsAtLeast(int still_to_find,
}
-void AssertionNode::FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+void AssertionNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
// Match the behaviour of EatsAtLeast on this node.
if (type() == AT_START && not_at_start) return;
- on_success()->FillInBMInfo(offset, bm, not_at_start);
+ on_success()->FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@@ -2803,14 +2793,20 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
-void LoopChoiceNode::FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
- if (body_can_be_zero_length_) {
+void LoopChoiceNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
+ if (body_can_be_zero_length_ ||
+ recursion_depth > RegExpCompiler::kMaxRecursion ||
+ budget <= 0) {
bm->SetRest(offset);
SaveBMInfo(bm, not_at_start, offset);
return;
}
- ChoiceNode::FillInBMInfo(offset, bm, not_at_start);
+ ChoiceNode::FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@@ -2912,7 +2908,7 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
new BoyerMooreLookahead(eats_at_least, compiler);
- FillInBMInfo(0, bm, not_at_start);
+ FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
}
@@ -3850,7 +3846,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
BoyerMooreLookahead* bm =
new BoyerMooreLookahead(eats_at_least, compiler);
GuardedAlternative alt0 = alternatives_->at(0);
- alt0.node()->FillInBMInfo(0, bm, not_at_start);
+ alt0.node()->FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
}
} else {
@@ -5589,8 +5585,11 @@ void Analysis::VisitAssertion(AssertionNode* that) {
}
-void BackReferenceNode::FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+void BackReferenceNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
// Working out the set of characters that a backreference can match is too
// hard, so we just say that any character can match.
bm->SetRest(offset);
@@ -5602,9 +5601,13 @@ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
RegExpMacroAssembler::kTableSize);
-void ChoiceNode::FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+void ChoiceNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
ZoneList<GuardedAlternative>* alts = alternatives();
+ budget = (budget - 1) / alts->length();
for (int i = 0; i < alts->length(); i++) {
GuardedAlternative& alt = alts->at(i);
if (alt.guards() != NULL && alt.guards()->length() != 0) {
@@ -5612,14 +5615,18 @@ void ChoiceNode::FillInBMInfo(
SaveBMInfo(bm, not_at_start, offset);
return;
}
- alt.node()->FillInBMInfo(offset, bm, not_at_start);
+ alt.node()->FillInBMInfo(
+ offset, recursion_depth + 1, budget, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
-void TextNode::FillInBMInfo(
- int initial_offset, BoyerMooreLookahead* bm, bool not_at_start) {
+void TextNode::FillInBMInfo(int initial_offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
if (initial_offset >= bm->length()) return;
int offset = initial_offset;
int max_char = bm->max_char();
@@ -5673,6 +5680,8 @@ void TextNode::FillInBMInfo(
return;
}
on_success()->FillInBMInfo(offset,
+ recursion_depth + 1,
+ budget - 1,
bm,
true); // Not at start after a text node.
if (initial_offset == 0) set_bm_info(not_at_start, bm);
@@ -5797,7 +5806,6 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) {
RegExpEngine::CompilationResult RegExpEngine::Compile(
RegExpCompileData* data,
bool ignore_case,
- bool is_global,
bool is_multiline,
Handle<String> pattern,
Handle<String> sample_subject,
@@ -5901,8 +5909,6 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
macro_assembler.SetCurrentPositionFromEnd(max_length);
}
- macro_assembler.set_global(is_global);
-
return compiler.Assemble(&macro_assembler,
node,
data->capture_count,
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 2e90e9ad5a..6b16dd34db 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -109,22 +109,16 @@ class RegExpImpl {
static int IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject);
- // Calculate the size of offsets vector for the case of global regexp
- // and the number of matches this vector is able to store.
- static int GlobalOffsetsVectorSize(Handle<JSRegExp> regexp,
- int registers_per_match,
- int* max_matches);
-
- // Execute a regular expression on the subject, starting from index.
- // If matching succeeds, return the number of matches. This can be larger
- // than one in the case of global regular expressions.
- // The captures and subcaptures are stored into the registers vector.
+ // Execute a regular expression once on the subject, starting from
+ // character "index".
+ // If successful, returns RE_SUCCESS and set the capture positions
+ // in the first registers.
// If matching fails, returns RE_FAILURE.
// If execution fails, sets a pending exception and returns RE_EXCEPTION.
- static int IrregexpExecRaw(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Vector<int> registers);
+ static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ Vector<int> registers);
// Execute an Irregexp bytecode pattern.
// On a successful match, the result is a JSArray containing
@@ -580,9 +574,14 @@ class RegExpNode: public ZoneObject {
// Collects information on the possible code units (mod 128) that can match if
// we look forward. This is used for a Boyer-Moore-like string searching
// implementation. TODO(erikcorry): This should share more code with
- // EatsAtLeast, GetQuickCheckDetails.
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ // EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
+ // the number of nodes we are willing to look at in order to create this data.
+ static const int kFillInBMBudget = 200;
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
UNREACHABLE();
}
@@ -681,9 +680,13 @@ class SeqRegExpNode: public RegExpNode {
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
virtual RegExpNode* FilterASCII(int depth);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
- on_success_->FillInBMInfo(offset, bm, not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
+ on_success_->FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
@@ -736,8 +739,11 @@ class ActionNode: public SeqRegExpNode {
return on_success()->GetQuickCheckDetails(
details, compiler, filled_in, not_at_start);
}
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
Type type() { return type_; }
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
@@ -805,8 +811,11 @@ class TextNode: public SeqRegExpNode {
virtual int GreedyLoopTextLength();
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
void CalculateOffsets();
virtual RegExpNode* FilterASCII(int depth);
@@ -865,8 +874,11 @@ class AssertionNode: public SeqRegExpNode {
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
AssertionNodeType type() { return type_; }
void set_type(AssertionNodeType type) { type_ = type; }
@@ -903,8 +915,11 @@ class BackReferenceNode: public SeqRegExpNode {
bool not_at_start) {
return;
}
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
private:
int start_reg_;
@@ -928,8 +943,11 @@ class EndNode: public RegExpNode {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
@@ -1018,8 +1036,11 @@ class ChoiceNode: public RegExpNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
bool being_calculated() { return being_calculated_; }
bool not_at_start() { return not_at_start_; }
@@ -1068,9 +1089,13 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
- alternatives_->at(1).node()->FillInBMInfo(offset, bm, not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
+ alternatives_->at(1).node()->FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
// For a negative lookahead we don't emit the quick check for the
@@ -1100,8 +1125,11 @@ class LoopChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
@@ -1551,7 +1579,6 @@ class RegExpEngine: public AllStatic {
static CompilationResult Compile(RegExpCompileData* input,
bool ignore_case,
- bool global,
bool multiline,
Handle<String> pattern,
Handle<String> sample_subject,
@@ -1580,8 +1607,7 @@ class OffsetsVector {
inline int* vector() { return vector_; }
inline int length() { return offsets_vector_length_; }
- static const int kStaticOffsetsVectorSize =
- Isolate::kJSRegexpStaticOffsetsVectorSize;
+ static const int kStaticOffsetsVectorSize = 50;
private:
static Address static_offsets_vector_address(Isolate* isolate) {
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 6cf3badc62..35ee3f5e8b 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -137,14 +137,6 @@ bool List<T, P>::RemoveElement(const T& elm) {
template<typename T, class P>
-void List<T, P>::Allocate(int length) {
- DeleteData(data_);
- Initialize(length);
- length_ = length;
-}
-
-
-template<typename T, class P>
void List<T, P>::Clear() {
DeleteData(data_);
Initialize(0);
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 7350c0d44e..a210dfb1b8 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -117,9 +117,6 @@ class List {
// pointer type. Returns the removed element.
INLINE(T RemoveLast()) { return Remove(length_ - 1); }
- // Deletes current list contents and allocates space for 'length' elements.
- INLINE(void Allocate(int length));
-
// Clears the list by setting the length to zero. Even if T is a
// pointer type, clearing the list doesn't delete the entries.
INLINE(void Clear());
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 4ee2a7aa5e..c41cce8d9d 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -225,12 +225,9 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
return 2;
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
return 3;
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
return kPointerSizeLog2;
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 22b82501e9..9c5294a26d 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,7 +30,6 @@
#include "liveedit.h"
-#include "code-stubs.h"
#include "compilation-cache.h"
#include "compiler.h"
#include "debug.h"
@@ -1476,36 +1475,26 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Check the nature of the top frame.
Isolate* isolate = Isolate::Current();
Code* pre_top_frame_code = pre_top_frame->LookupCode();
- bool frame_has_padding;
if (pre_top_frame_code->is_inline_cache_stub() &&
pre_top_frame_code->ic_state() == DEBUG_BREAK) {
// OK, we can drop inline cache calls.
*mode = Debug::FRAME_DROPPED_IN_IC_CALL;
- frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
} else if (pre_top_frame_code ==
isolate->debug()->debug_break_slot()) {
// OK, we can drop debug break slot.
*mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
- frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
} else if (pre_top_frame_code ==
isolate->builtins()->builtin(
Builtins::kFrameDropper_LiveEdit)) {
// OK, we can drop our own code.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
- frame_has_padding = false;
} else if (pre_top_frame_code ==
isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
*mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
- frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
} else if (pre_top_frame_code->kind() == Code::STUB &&
- pre_top_frame_code->major_key() == CodeStub::CEntry) {
- // Entry from our unit tests on 'debugger' statement.
- // It's fine, we support this case.
+ pre_top_frame_code->major_key()) {
+ // Entry from our unit tests, it's fine, we support this case.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
- // We don't have a padding from 'debugger' statement call.
- // Here the stub is CEntry, it's not debug-only and can't be padded.
- // If anyone would complain, a proxy padded stub could be added.
- frame_has_padding = false;
} else {
return "Unknown structure of stack above changing function";
}
@@ -1515,49 +1504,8 @@ static const char* DropFrames(Vector<StackFrame*> frames,
- Debug::kFrameDropperFrameSize * kPointerSize // Size of the new frame.
+ kPointerSize; // Bigger address end is exclusive.
- Address* top_frame_pc_address = top_frame->pc_address();
-
- // top_frame may be damaged below this point. Do not used it.
- ASSERT(!(top_frame = NULL));
-
if (unused_stack_top > unused_stack_bottom) {
- if (frame_has_padding) {
- int shortage_bytes =
- static_cast<int>(unused_stack_top - unused_stack_bottom);
-
- Address padding_start = pre_top_frame->fp() -
- Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize;
-
- Address padding_pointer = padding_start;
- Smi* padding_object =
- Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue);
- while (Memory::Object_at(padding_pointer) == padding_object) {
- padding_pointer -= kPointerSize;
- }
- int padding_counter =
- Smi::cast(Memory::Object_at(padding_pointer))->value();
- if (padding_counter * kPointerSize < shortage_bytes) {
- return "Not enough space for frame dropper frame "
- "(even with padding frame)";
- }
- Memory::Object_at(padding_pointer) =
- Smi::FromInt(padding_counter - shortage_bytes / kPointerSize);
-
- StackFrame* pre_pre_frame = frames[top_frame_index - 2];
-
- memmove(padding_start + kPointerSize - shortage_bytes,
- padding_start + kPointerSize,
- Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
-
- pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
- pre_pre_frame->SetCallerFp(pre_top_frame->fp());
- unused_stack_top -= shortage_bytes;
-
- STATIC_ASSERT(sizeof(Address) == kPointerSize);
- top_frame_pc_address -= shortage_bytes / kPointerSize;
- } else {
- return "Not enough space for frame dropper frame";
- }
+ return "Not enough space for frame dropper frame";
}
// Committing now. After this point we should return only NULL value.
@@ -1567,7 +1515,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
- *top_frame_pc_address = code->entry();
+ top_frame->set_pc(code->entry());
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
*restarter_frame_function_pointer =
diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h
index 2f7e31fea5..43f6b8986f 100644
--- a/deps/v8/src/mark-compact-inl.h
+++ b/deps/v8/src/mark-compact-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -52,15 +52,6 @@ void MarkCompactCollector::SetFlags(int flags) {
}
-bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) {
- if (MarkObjectWithoutPush(obj)) {
- marking_deque_.PushBlack(obj);
- return true;
- }
- return false;
-}
-
-
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
if (!mark_bit.Get()) {
@@ -71,13 +62,16 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
}
-bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- if (!mark_bit.Get()) {
- SetMark(obj, mark_bit);
- return true;
- }
- return false;
+bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
+ MarkBit mark = Marking::MarkBitFrom(object);
+ bool old_mark = mark.Get();
+ if (!old_mark) SetMark(object, mark);
+ return old_mark;
+}
+
+
+void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
+ if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
}
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 69542685a2..507ad84090 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -64,13 +64,13 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
abort_incremental_marking_(false),
compacting_(false),
was_marked_incrementally_(false),
+ collect_maps_(FLAG_collect_maps),
flush_monomorphic_ics_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
- encountered_weak_maps_(NULL),
- marker_(this, this) { }
+ encountered_weak_maps_(NULL) { }
#ifdef DEBUG
@@ -282,7 +282,7 @@ void MarkCompactCollector::CollectGarbage() {
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
- if (FLAG_collect_maps) ClearNonLiveTransitions();
+ if (collect_maps_) ClearNonLiveTransitions();
ClearWeakMaps();
@@ -294,7 +294,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepSpaces();
- if (!FLAG_collect_maps) ReattachInitialMaps();
+ if (!collect_maps_) ReattachInitialMaps();
Finish();
@@ -658,6 +658,11 @@ void MarkCompactCollector::AbortCompaction() {
void MarkCompactCollector::Prepare(GCTracer* tracer) {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
+ // Disable collection of maps if incremental marking is enabled.
+ // Map collection algorithm relies on a special map transition tree traversal
+ // order which is not implemented for incremental marking.
+ collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
+
// Monomorphic ICs are preserved when possible, but need to be flushed
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
@@ -675,6 +680,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
+ if (collect_maps_) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit) {
// If GDBJIT interface is active disable compaction.
@@ -1180,7 +1186,16 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap();
Code* code = reinterpret_cast<Code*>(object);
if (FLAG_cleanup_code_caches_at_gc) {
- code->ClearTypeFeedbackCells(heap);
+ Object* raw_info = code->type_feedback_info();
+ if (raw_info->IsTypeFeedbackInfo()) {
+ TypeFeedbackCells* type_feedback_cells =
+ TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
+ for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
+ ASSERT(type_feedback_cells->AstId(i)->IsSmi());
+ JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
+ cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+ }
+ }
}
code->CodeIterateBody<StaticMarkingVisitor>(heap);
}
@@ -1793,11 +1808,11 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's transitions
- // in a special way to make transition links weak. Only maps for subclasses
- // of JSReceiver can have transitions.
+ // in a special way to make transition links weak.
+ // Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
- marker_.MarkMapContents(map);
+ if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ MarkMapContents(map);
} else {
marking_deque_.PushBlack(map);
}
@@ -1807,86 +1822,79 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
}
-// Force instantiation of template instances.
-template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
-template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
-
-
-template <class T>
-void Marker<T>::MarkMapContents(Map* map) {
+void MarkCompactCollector::MarkMapContents(Map* map) {
// Mark prototype transitions array but don't push it into marking stack.
// This will make references from it weak. We will clean dead prototype
// transitions in ClearNonLiveTransitions.
- Object** proto_trans_slot =
- HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
- HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
- if (prototype_transitions->IsFixedArray()) {
- mark_compact_collector()->RecordSlot(proto_trans_slot,
- proto_trans_slot,
- prototype_transitions);
- MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
- if (!mark.Get()) {
- mark.Set();
- MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
- prototype_transitions->Size());
- }
+ FixedArray* prototype_transitions = map->prototype_transitions();
+ MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
+ if (!mark.Get()) {
+ mark.Set();
+ MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
+ prototype_transitions->Size());
}
- // Make sure that the back pointer stored either in the map itself or inside
- // its prototype transitions array is marked. Treat pointers in the descriptor
- // array as weak and also mark that array to prevent visiting it later.
- base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
-
- Object** descriptor_array_slot =
+ Object** raw_descriptor_array_slot =
HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
- Object* descriptor_array = *descriptor_array_slot;
- if (!descriptor_array->IsSmi()) {
- MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
- }
-
- // Mark the Object* fields of the Map. Since the descriptor array has been
- // marked already, it is fine that one of these fields contains a pointer
- // to it. But make sure to skip back pointer and prototype transitions.
- STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
- Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
- Object** start_slot = HeapObject::RawField(
- map, Map::kPointerFieldsBeginOffset);
- Object** end_slot = HeapObject::RawField(
- map, Map::kPrototypeTransitionsOrBackPointerOffset);
- for (Object** slot = start_slot; slot < end_slot; slot++) {
- Object* obj = *slot;
- if (!obj->NonFailureIsHeapObject()) continue;
- mark_compact_collector()->RecordSlot(start_slot, slot, obj);
- base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
+ Object* raw_descriptor_array = *raw_descriptor_array_slot;
+ if (!raw_descriptor_array->IsSmi()) {
+ MarkDescriptorArray(
+ reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
}
+
+ // Mark the Object* fields of the Map.
+ // Since the descriptor array has been marked already, it is fine
+ // that one of these fields contains a pointer to it.
+ Object** start_slot = HeapObject::RawField(map,
+ Map::kPointerFieldsBeginOffset);
+
+ Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
+
+ StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
+}
+
+
+void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
+ int offset) {
+ Object** slot = HeapObject::RawField(accessors, offset);
+ HeapObject* accessor = HeapObject::cast(*slot);
+ if (accessor->IsMap()) return;
+ RecordSlot(slot, slot, accessor);
+ MarkObjectAndPush(accessor);
}
-template <class T>
-void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
+void MarkCompactCollector::MarkDescriptorArray(
+ DescriptorArray* descriptors) {
+ MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
+ if (descriptors_mark.Get()) return;
// Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
+ ASSERT(descriptors != heap()->empty_descriptor_array());
+ SetMark(descriptors, descriptors_mark);
- // The DescriptorArray contains a pointer to its contents array, but the
- // contents array will be marked black and hence not be visited again.
- if (!base_marker()->MarkObjectAndPush(descriptors)) return;
- FixedArray* contents = FixedArray::cast(
+ FixedArray* contents = reinterpret_cast<FixedArray*>(
descriptors->get(DescriptorArray::kContentArrayIndex));
+ ASSERT(contents->IsHeapObject());
+ ASSERT(!IsMarked(contents));
+ ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
- ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents)));
- base_marker()->MarkObjectWithoutPush(contents);
-
- // Contents contains (value, details) pairs. If the descriptor contains a
- // transition (value is a Map), we don't mark the value as live. It might
- // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later.
+ MarkBit contents_mark = Marking::MarkBitFrom(contents);
+ SetMark(contents, contents_mark);
+ // Contents contains (value, details) pairs. If the details say that the type
+ // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
+ // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
+ // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
+ // CONSTANT_TRANSITION is the value an Object* (a Map*).
for (int i = 0; i < contents->length(); i += 2) {
+ // If the pair (value, details) at index i, i+1 is not
+ // a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
Object** slot = contents->data_start() + i;
if (!(*slot)->IsHeapObject()) continue;
HeapObject* value = HeapObject::cast(*slot);
- mark_compact_collector()->RecordSlot(slot, slot, *slot);
+ RecordSlot(slot, slot, *slot);
switch (details.type()) {
case NORMAL:
@@ -1894,22 +1902,21 @@ void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
case CONSTANT_FUNCTION:
case HANDLER:
case INTERCEPTOR:
- base_marker()->MarkObjectAndPush(value);
+ MarkObjectAndPush(value);
break;
case CALLBACKS:
if (!value->IsAccessorPair()) {
- base_marker()->MarkObjectAndPush(value);
- } else if (base_marker()->MarkObjectWithoutPush(value)) {
- AccessorPair* accessors = AccessorPair::cast(value);
- MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
- MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
+ MarkObjectAndPush(value);
+ } else if (!MarkObjectWithoutPush(value)) {
+ MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
+ MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
}
break;
case ELEMENTS_TRANSITION:
// For maps with multiple elements transitions, the transition maps are
// stored in a FixedArray. Keep the fixed array alive but not the maps
// that it refers to.
- if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value);
+ if (value->IsFixedArray()) MarkObjectWithoutPush(value);
break;
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
@@ -1917,16 +1924,26 @@ void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
break;
}
}
+ // The DescriptorArray descriptors contains a pointer to its contents array,
+ // but the contents array is already marked.
+ marking_deque_.PushBlack(descriptors);
}
-template <class T>
-void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
- Object** slot = HeapObject::RawField(accessors, offset);
- HeapObject* accessor = HeapObject::cast(*slot);
- if (accessor->IsMap()) return;
- mark_compact_collector()->RecordSlot(slot, slot, accessor);
- base_marker()->MarkObjectAndPush(accessor);
+void MarkCompactCollector::CreateBackPointers() {
+ HeapObjectIterator iterator(heap()->map_space());
+ for (HeapObject* next_object = iterator.Next();
+ next_object != NULL; next_object = iterator.Next()) {
+ if (next_object->IsMap()) { // Could also be FreeSpace object on free list.
+ Map* map = Map::cast(next_object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ map->CreateBackPointers();
+ } else {
+ ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
+ }
+ }
+ }
}
@@ -2453,8 +2470,15 @@ void MarkCompactCollector::ReattachInitialMaps() {
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from
- // a marked map to an unmarked map to null transitions. This action
- // is carried out only on maps of JSObjects and related subtypes.
+ // a marked map to an unmarked map to null transitions. At the same time,
+ // set all the prototype fields of maps back to their original value,
+ // dropping the back pointers temporarily stored in the prototype field.
+ // Setting the prototype field requires following the linked list of
+ // back pointers, reversing them all at once. This allows us to find
+ // those maps with map transitions that need to be nulled, and only
+ // scan the descriptor arrays of those maps, not all maps.
+ // All of these actions are carried out only on maps of JSObjects
+ // and related subtypes.
for (HeapObject* obj = map_iterator.Next();
obj != NULL; obj = map_iterator.Next()) {
Map* map = reinterpret_cast<Map*>(obj);
@@ -2530,16 +2554,36 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
MarkBit map_mark) {
- Object* potential_parent = map->GetBackPointer();
- if (!potential_parent->IsMap()) return;
- Map* parent = Map::cast(potential_parent);
+ // Follow the chain of back pointers to find the prototype.
+ Object* real_prototype = map;
+ while (real_prototype->IsMap()) {
+ real_prototype = Map::cast(real_prototype)->prototype();
+ ASSERT(real_prototype->IsHeapObject());
+ }
- // Follow back pointer, check whether we are dealing with a map transition
- // from a live map to a dead path and in case clear transitions of parent.
+ // Follow back pointers, setting them to prototype, clearing map transitions
+ // when necessary.
+ Map* current = map;
bool current_is_alive = map_mark.Get();
- bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
- if (!current_is_alive && parent_is_alive) {
- parent->ClearNonLiveTransitions(heap());
+ bool on_dead_path = !current_is_alive;
+ while (current->IsMap()) {
+ Object* next = current->prototype();
+ // There should never be a dead map above a live map.
+ ASSERT(on_dead_path || current_is_alive);
+
+ // A live map above a dead map indicates a dead transition. This test will
+ // always be false on the first iteration.
+ if (on_dead_path && current_is_alive) {
+ on_dead_path = false;
+ current->ClearNonLiveTransitions(heap(), real_prototype);
+ }
+
+ Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
+ *slot = real_prototype;
+ if (current_is_alive) RecordSlot(slot, slot, real_prototype);
+
+ current = reinterpret_cast<Map*>(next);
+ current_is_alive = Marking::MarkBitFrom(current).Get();
}
}
@@ -2738,9 +2782,7 @@ static void UpdatePointer(HeapObject** p, HeapObject* object) {
// We have to zap this pointer, because the store buffer may overflow later,
// and then we have to scan the entire heap and we don't want to find
// spurious newspace pointers in the old space.
- // TODO(mstarzinger): This was changed to a sentinel value to track down
- // rare crashes, change it back to Smi::FromInt(0) later.
- *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
+ *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
}
}
@@ -3796,7 +3838,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
bool lazy_sweeping_active = false;
bool unused_page_present = false;
- intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
+ intptr_t old_space_size = heap()->PromotedSpaceSize();
intptr_t space_left =
Min(heap()->OldGenPromotionLimit(old_space_size),
heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index dbc28697f0..f8488bb00d 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -42,7 +42,6 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Forward declarations.
class CodeFlusher;
class GCTracer;
-class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
@@ -167,6 +166,7 @@ class Marking {
// ----------------------------------------------------------------------------
// Marking deque for tracing live objects.
+
class MarkingDeque {
public:
MarkingDeque()
@@ -383,34 +383,6 @@ class SlotsBuffer {
};
-// -------------------------------------------------------------------------
-// Marker shared between incremental and non-incremental marking
-template<class BaseMarker> class Marker {
- public:
- Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector)
- : base_marker_(base_marker),
- mark_compact_collector_(mark_compact_collector) {}
-
- // Mark pointers in a Map and its DescriptorArray together, possibly
- // treating transitions or back pointers weak.
- void MarkMapContents(Map* map);
- void MarkDescriptorArray(DescriptorArray* descriptors);
- void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
-
- private:
- BaseMarker* base_marker() {
- return base_marker_;
- }
-
- MarkCompactCollector* mark_compact_collector() {
- return mark_compact_collector_;
- }
-
- BaseMarker* base_marker_;
- MarkCompactCollector* mark_compact_collector_;
-};
-
-
// Defined in isolate.h.
class ThreadLocalTop;
@@ -612,6 +584,8 @@ class MarkCompactCollector {
bool was_marked_incrementally_;
+ bool collect_maps_;
+
bool flush_monomorphic_ics_;
// A pointer to the current stack-allocated GC tracer object during a full
@@ -634,13 +608,12 @@ class MarkCompactCollector {
//
// After: Live objects are marked and non-live objects are unmarked.
+
friend class RootMarkingVisitor;
friend class MarkingVisitor;
friend class StaticMarkingVisitor;
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
- friend class Marker<IncrementalMarking>;
- friend class Marker<MarkCompactCollector>;
// Mark non-optimize code for functions inlined into the given optimized
// code. This will prevent it from being flushed.
@@ -658,25 +631,29 @@ class MarkCompactCollector {
void AfterMarking();
// Marks the object black and pushes it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for non-incremental marking only.
- INLINE(bool MarkObjectAndPush(HeapObject* obj));
-
- // Marks the object black and pushes it on the marking stack.
- // This is for non-incremental marking only.
+ // This is for non-incremental marking.
INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for non-incremental marking only.
- INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
+ INLINE(bool MarkObjectWithoutPush(HeapObject* object));
+ INLINE(void MarkObjectAndPush(HeapObject* value));
- // Marks the object black assuming that it is not yet marked.
- // This is for non-incremental marking only.
+ // Marks the object black. This is for non-incremental marking.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
void ProcessNewlyMarkedObject(HeapObject* obj);
+ // Creates back pointers for all map transitions, stores them in
+ // the prototype field. The original prototype pointers are restored
+ // in ClearNonLiveTransitions(). All JSObject maps
+ // connected by map transitions have the same prototype object, which
+ // is why we can use this field temporarily for back pointers.
+ void CreateBackPointers();
+
+ // Mark a Map and its DescriptorArray together, skipping transitions.
+ void MarkMapContents(Map* map);
+ void MarkAccessorPairSlot(HeapObject* accessors, int offset);
+ void MarkDescriptorArray(DescriptorArray* descriptors);
+
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootMarkingVisitor* visitor);
@@ -779,7 +756,6 @@ class MarkCompactCollector {
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
Object* encountered_weak_maps_;
- Marker<MarkCompactCollector> marker_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 2a00ba8469..a3adcf8634 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -61,21 +61,18 @@ function FormatString(format, message) {
// To check if something is a native error we need to check the
-// concrete native error types. It is not sufficient to use instanceof
-// since it possible to create an object that has Error.prototype on
-// its prototype chain. This is the case for DOMException for example.
+// concrete native error types. It is not enough to check "obj
+// instanceof $Error" because user code can replace
+// NativeError.prototype.__proto__. User code cannot replace
+// NativeError.prototype though and therefore this is a safe test.
function IsNativeErrorObject(obj) {
- switch (%_ClassOf(obj)) {
- case 'Error':
- case 'EvalError':
- case 'RangeError':
- case 'ReferenceError':
- case 'SyntaxError':
- case 'TypeError':
- case 'URIError':
- return true;
- }
- return false;
+ return (obj instanceof $Error) ||
+ (obj instanceof $EvalError) ||
+ (obj instanceof $RangeError) ||
+ (obj instanceof $ReferenceError) ||
+ (obj instanceof $SyntaxError) ||
+ (obj instanceof $TypeError) ||
+ (obj instanceof $URIError);
}
@@ -748,7 +745,7 @@ function GetPositionInLine(message) {
function GetStackTraceLine(recv, fun, pos, isGlobal) {
- return new CallSite(recv, fun, pos).toString();
+ return FormatSourcePosition(new CallSite(recv, fun, pos));
}
// ----------------------------------------------------------------------------
@@ -788,7 +785,15 @@ function CallSiteGetThis() {
}
function CallSiteGetTypeName() {
- return GetTypeName(this, false);
+ var constructor = this.receiver.constructor;
+ if (!constructor) {
+ return %_CallFunction(this.receiver, ObjectToString);
+ }
+ var constructorName = constructor.name;
+ if (!constructorName) {
+ return %_CallFunction(this.receiver, ObjectToString);
+ }
+ return constructorName;
}
function CallSiteIsToplevel() {
@@ -822,10 +827,8 @@ function CallSiteGetFunctionName() {
var name = this.fun.name;
if (name) {
return name;
- }
- name = %FunctionGetInferredName(this.fun);
- if (name) {
- return name;
+ } else {
+ return %FunctionGetInferredName(this.fun);
}
// Maybe this is an evaluation?
var script = %FunctionGetScript(this.fun);
@@ -916,69 +919,6 @@ function CallSiteIsConstructor() {
return this.fun === constructor;
}
-function CallSiteToString() {
- var fileName;
- var fileLocation = "";
- if (this.isNative()) {
- fileLocation = "native";
- } else if (this.isEval()) {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName) {
- fileLocation = this.getEvalOrigin();
- }
- } else {
- fileName = this.getFileName();
- }
-
- if (fileName) {
- fileLocation += fileName;
- var lineNumber = this.getLineNumber();
- if (lineNumber != null) {
- fileLocation += ":" + lineNumber;
- var columnNumber = this.getColumnNumber();
- if (columnNumber) {
- fileLocation += ":" + columnNumber;
- }
- }
- }
-
- if (!fileLocation) {
- fileLocation = "unknown source";
- }
- var line = "";
- var functionName = this.getFunctionName();
- var addSuffix = true;
- var isConstructor = this.isConstructor();
- var isMethodCall = !(this.isToplevel() || isConstructor);
- if (isMethodCall) {
- var typeName = GetTypeName(this, true);
- var methodName = this.getMethodName();
- if (functionName) {
- if (typeName && functionName.indexOf(typeName) != 0) {
- line += typeName + ".";
- }
- line += functionName;
- if (methodName && functionName.lastIndexOf("." + methodName) !=
- functionName.length - methodName.length - 1) {
- line += " [as " + methodName + "]";
- }
- } else {
- line += typeName + "." + (methodName || "<anonymous>");
- }
- } else if (isConstructor) {
- line += "new " + (functionName || "<anonymous>");
- } else if (functionName) {
- line += functionName;
- } else {
- line += fileLocation;
- addSuffix = false;
- }
- if (addSuffix) {
- line += " (" + fileLocation + ")";
- }
- return line;
-}
-
SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
"getThis", CallSiteGetThis,
"getTypeName", CallSiteGetTypeName,
@@ -994,8 +934,7 @@ SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
"getColumnNumber", CallSiteGetColumnNumber,
"isNative", CallSiteIsNative,
"getPosition", CallSiteGetPosition,
- "isConstructor", CallSiteIsConstructor,
- "toString", CallSiteToString
+ "isConstructor", CallSiteIsConstructor
));
@@ -1037,6 +976,65 @@ function FormatEvalOrigin(script) {
return eval_origin;
}
+function FormatSourcePosition(frame) {
+ var fileName;
+ var fileLocation = "";
+ if (frame.isNative()) {
+ fileLocation = "native";
+ } else if (frame.isEval()) {
+ fileName = frame.getScriptNameOrSourceURL();
+ if (!fileName) {
+ fileLocation = frame.getEvalOrigin();
+ }
+ } else {
+ fileName = frame.getFileName();
+ }
+
+ if (fileName) {
+ fileLocation += fileName;
+ var lineNumber = frame.getLineNumber();
+ if (lineNumber != null) {
+ fileLocation += ":" + lineNumber;
+ var columnNumber = frame.getColumnNumber();
+ if (columnNumber) {
+ fileLocation += ":" + columnNumber;
+ }
+ }
+ }
+
+ if (!fileLocation) {
+ fileLocation = "unknown source";
+ }
+ var line = "";
+ var functionName = frame.getFunction().name;
+ var addPrefix = true;
+ var isConstructor = frame.isConstructor();
+ var isMethodCall = !(frame.isToplevel() || isConstructor);
+ if (isMethodCall) {
+ var methodName = frame.getMethodName();
+ line += frame.getTypeName() + ".";
+ if (functionName) {
+ line += functionName;
+ if (methodName && (methodName != functionName)) {
+ line += " [as " + methodName + "]";
+ }
+ } else {
+ line += methodName || "<anonymous>";
+ }
+ } else if (isConstructor) {
+ line += "new " + (functionName || "<anonymous>");
+ } else if (functionName) {
+ line += functionName;
+ } else {
+ line += fileLocation;
+ addPrefix = false;
+ }
+ if (addPrefix) {
+ line += " (" + fileLocation + ")";
+ }
+ return line;
+}
+
function FormatStackTrace(error, frames) {
var lines = [];
try {
@@ -1052,7 +1050,7 @@ function FormatStackTrace(error, frames) {
var frame = frames[i];
var line;
try {
- line = frame.toString();
+ line = FormatSourcePosition(frame);
} catch (e) {
try {
line = "<error: " + e + ">";
@@ -1083,19 +1081,6 @@ function FormatRawStackTrace(error, raw_stack) {
}
}
-function GetTypeName(obj, requireConstructor) {
- var constructor = obj.receiver.constructor;
- if (!constructor) {
- return requireConstructor ? null :
- %_CallFunction(obj.receiver, ObjectToString);
- }
- var constructorName = constructor.name;
- if (!constructorName) {
- return requireConstructor ? null :
- %_CallFunction(obj.receiver, ObjectToString);
- }
- return constructorName;
-}
function captureStackTrace(obj, cons_opt) {
var stackTraceLimit = $Error.stackTraceLimit;
@@ -1140,7 +1125,13 @@ function SetUpError() {
}
%FunctionSetInstanceClassName(f, 'Error');
%SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
- %SetProperty(f.prototype, "name", name, DONT_ENUM);
+ // The name property on the prototype of error objects is not
+ // specified as being read-one and dont-delete. However, allowing
+ // overwriting allows leaks of error objects between script blocks
+ // in the same context in a browser setting. Therefore we fix the
+ // name.
+ %SetProperty(f.prototype, "name", name,
+ DONT_ENUM | DONT_DELETE | READ_ONLY) ;
%SetCode(f, function(m) {
if (%_IsConstructCall()) {
// Define all the expected properties directly on the error
@@ -1156,8 +1147,10 @@ function SetUpError() {
return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
});
} else if (!IS_UNDEFINED(m)) {
- %IgnoreAttributesAndSetProperty(
- this, 'message', ToString(m), DONT_ENUM);
+ %IgnoreAttributesAndSetProperty(this,
+ 'message',
+ ToString(m),
+ DONT_ENUM);
}
captureStackTrace(this, f);
} else {
@@ -1187,41 +1180,16 @@ $Error.captureStackTrace = captureStackTrace;
var visited_errors = new InternalArray();
var cyclic_error_marker = new $Object();
-function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
- // Climb the prototype chain until we find the holder.
- while (error && !%HasLocalProperty(error, name)) {
- error = error.__proto__;
- }
- if (error === null) return void 0;
- if (!IS_OBJECT(error)) return error[name];
- // If the property is an accessor on one of the predefined errors that can be
- // generated statically by the compiler, don't touch it. This is to address
- // http://code.google.com/p/chromium/issues/detail?id=69187
- var desc = %GetOwnProperty(error, name);
- if (desc && desc[IS_ACCESSOR_INDEX]) {
- var isName = name === "name";
- if (error === $ReferenceError.prototype)
- return isName ? "ReferenceError" : void 0;
- if (error === $SyntaxError.prototype)
- return isName ? "SyntaxError" : void 0;
- if (error === $TypeError.prototype)
- return isName ? "TypeError" : void 0;
- }
- // Otherwise, read normally.
- return error[name];
-}
-
function ErrorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
try {
- var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type");
- var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
+ var type = error.type;
+ var name = error.name;
name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
- var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
+ var message = error.message;
var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
if (type && !hasMessage) {
- var args = GetPropertyWithoutInvokingMonkeyGetters(error, "arguments");
- message = FormatMessage(%NewMessageObject(type, args));
+ message = FormatMessage(%NewMessageObject(type, error.arguments));
}
message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
if (name === "") return message;
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 5a2074e652..eeb84c3a94 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -118,7 +118,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -214,8 +214,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
+ __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ Assert(
@@ -450,10 +449,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Branch(call_generic_code);
__ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// a3: JSArray
__ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
a2,
t5,
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index a464348fa7..f3dd95b851 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -5043,7 +5043,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
+ const int kRegExpExecuteArguments = 8;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
@@ -5054,33 +5054,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// allocating space for the c argument slots, we don't need to calculate
// that into the argument positions on the stack. This is how the stack will
// look (sp meaning the value of sp at this moment):
- // [sp + 5] - Argument 9
// [sp + 4] - Argument 8
// [sp + 3] - Argument 7
// [sp + 2] - Argument 6
// [sp + 1] - Argument 5
// [sp + 0] - saved ra
- // Argument 9: Pass current isolate address.
+ // Argument 8: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots.
__ li(a0, Operand(ExternalReference::isolate_address()));
- __ sw(a0, MemOperand(sp, 5 * kPointerSize));
+ __ sw(a0, MemOperand(sp, 4 * kPointerSize));
- // Argument 8: Indicate that this is a direct call from JavaScript.
+ // Argument 7: Indicate that this is a direct call from JavaScript.
__ li(a0, Operand(1));
- __ sw(a0, MemOperand(sp, 4 * kPointerSize));
+ __ sw(a0, MemOperand(sp, 3 * kPointerSize));
- // Argument 7: Start (high end) of backtracking stack memory area.
+ // Argument 6: Start (high end) of backtracking stack memory area.
__ li(a0, Operand(address_of_regexp_stack_memory_address));
__ lw(a0, MemOperand(a0, 0));
__ li(a2, Operand(address_of_regexp_stack_memory_size));
__ lw(a2, MemOperand(a2, 0));
__ addu(a0, a0, a2);
- __ sw(a0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(a0, zero_reg);
__ sw(a0, MemOperand(sp, 2 * kPointerSize));
// Argument 5: static offsets vector buffer.
@@ -5131,9 +5125,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ Branch(&success, eq, v0, Operand(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
+ __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
Label failure;
__ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
// If not exception it can only be retry. Handle that in the runtime system.
@@ -7370,8 +7362,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
@@ -7637,9 +7629,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(a2, t1, &double_elements);
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(a0, &smi_element);
- __ CheckFastSmiElements(a2, t1, &fast_elements);
+ __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -7651,7 +7643,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(t1, t0);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -7664,8 +7656,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -7674,7 +7666,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 44e0359e44..9acccdc2ca 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -72,7 +72,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
@@ -95,7 +95,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
-void ElementsTransitionGenerator::GenerateSmiToDouble(
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 3be1e4d8b2..83f5f50172 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -116,8 +116,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions);
}
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 3ed794ac59..7be50569cd 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -1711,8 +1711,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements =
- IsFastObjectElementsKind(constant_elements_kind);
+ bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1734,7 +1733,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1763,7 +1763,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
- if (IsFastObjectElementsKind(constant_elements_kind)) {
+ if (constant_elements_kind == FAST_ELEMENTS) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ lw(t2, MemOperand(sp)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 5d530d0e9b..32da2df182 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -1347,35 +1347,34 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&non_double_value, ne, t0, Operand(at));
-
- // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
- // and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
t0,
&slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
t0,
&slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
- // Elements are double, but value is an Object that's not a HeapNumber. Make
- // sure that the receiver is a Array with Object elements and transition array
- // from double elements to Object elements.
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
@@ -1472,7 +1471,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&fail);
@@ -1689,12 +1688,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(address());
}
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Address address) {
Address andi_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1728,30 +1727,33 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
- // This is patching a conditional "jump if not smi/jump if smi" site.
- // Enabling by changing from
- // andi at, rx, 0
- // Branch <target>, eq, at, Operand(zero_reg)
- // to:
- // andi at, rx, #kSmiTagMask
- // Branch <target>, ne, at, Operand(zero_reg)
- // and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
- Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
- if (check == ENABLE_INLINED_SMI_CHECK) {
- ASSERT(Assembler::IsAndImmediate(instr_at_patch));
- ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
- patcher.masm()->andi(at, reg, kSmiTagMask);
- } else {
- ASSERT(check == DISABLE_INLINED_SMI_CHECK);
- ASSERT(Assembler::IsAndImmediate(instr_at_patch));
- patcher.masm()->andi(at, reg, 0);
- }
+ ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+ ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::IsBeq(branch_instr)) {
+ // This is patching a "jump if not smi" site to be active.
+ // Changing:
+ // andi at, rx, 0
+ // Branch <target>, eq, at, Operand(zero_reg)
+ // to:
+ // andi at, rx, #kSmiTagMask
+ // Branch <target>, ne, at, Operand(zero_reg)
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+ patcher.masm()->andi(at, reg, kSmiTagMask);
patcher.ChangeBranchCondition(ne);
} else {
ASSERT(Assembler::IsBne(branch_instr));
+ // This is patching a "jump if smi" site to be active.
+ // Changing:
+ // andi at, rx, 0
+ // Branch <target>, ne, at, Operand(zero_reg)
+ // to:
+ // andi at, rx, #kSmiTagMask
+ // Branch <target>, eq, at, Operand(zero_reg)
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+ patcher.masm()->andi(at, reg, kSmiTagMask);
patcher.ChangeBranchCondition(eq);
}
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 5c5c8423b4..122fd1ef4c 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -2343,37 +2343,40 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
-
int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- return;
- }
Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- if (last && !need_generic) {
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- } else {
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ li(a2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
Label next;
__ Branch(&next, ne, scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ Branch(&done);
__ bind(&next);
}
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ Branch(&generic, ne, scratch, Operand(map));
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ Branch(&done);
+ __ bind(&generic);
+ __ li(a2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
}
- if (need_generic) {
- __ li(a2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
}
@@ -2448,10 +2451,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ Ext(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
- __ Branch(&fail, lt, scratch,
- Operand(GetInitialFastElementsKind()));
- __ Branch(&done, le, scratch,
- Operand(TERMINAL_FAST_ELEMENTS_KIND));
+ __ Branch(&done, eq, scratch,
+ Operand(FAST_ELEMENTS));
__ Branch(&fail, lt, scratch,
Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ Branch(&done, le, scratch,
@@ -2504,9 +2505,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
__ addu(scratch, elements, scratch);
- uint32_t offset = FixedArray::kHeaderSize +
- (instr->additional_index() << kPointerSizeLog2);
- __ lw(result, FieldMemOperand(scratch, offset));
+ __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2537,21 +2536,17 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
if (key_is_constant) {
- __ Addu(elements, elements,
- Operand(((constant_key + instr->additional_index()) << shift_size) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
__ Addu(elements, elements, Operand(scratch));
__ Addu(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << shift_size)));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
- }
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
__ ldc1(result, MemOperand(elements));
}
@@ -2573,41 +2568,32 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << shift_size);
+ __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ __ lwc1(result, MemOperand(scratch0()));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ __ ldc1(result, MemOperand(scratch0()));
}
} else {
Register result = ToRegister(instr->result());
Register scratch = scratch0();
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ Addu(scratch, key, instr->additional_index());
- }
MemOperand mem_operand(zero_reg);
if (key_is_constant) {
- mem_operand =
- MemOperand(external_pointer,
- (constant_key << shift_size) + additional_offset);
+ mem_operand = MemOperand(external_pointer,
+ constant_key * (1 << shift_size));
} else {
- if (instr->additional_index() == 0) {
- __ sll(scratch, key, shift_size);
- } else {
- __ sll(scratch, scratch, shift_size);
- }
+ __ sll(scratch, key, shift_size);
__ Addu(scratch, scratch, external_pointer);
mem_operand = MemOperand(scratch);
}
@@ -2640,10 +2626,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3525,17 +3508,11 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
- (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
- + FixedArray::kHeaderSize;
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ sw(value, FieldMemOperand(elements, offset));
} else {
__ sll(scratch, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch);
- if (instr->additional_index() != 0) {
- __ Addu(scratch,
- scratch,
- instr->additional_index() << kPointerSizeLog2);
- }
__ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
}
@@ -3578,7 +3555,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << shift_size) +
+ __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
@@ -3599,7 +3576,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
__ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size));
+ __ sdc1(value, MemOperand(scratch));
}
@@ -3620,13 +3597,12 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << shift_size);
+ __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
@@ -3634,27 +3610,19 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
+ __ swc1(double_scratch0(), MemOperand(scratch0()));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(scratch0(), additional_offset));
+ __ sdc1(value, MemOperand(scratch0()));
}
} else {
Register value(ToRegister(instr->value()));
- Register scratch = scratch0();
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ Addu(scratch, key, instr->additional_index());
- }
MemOperand mem_operand(zero_reg);
+ Register scratch = scratch0();
if (key_is_constant) {
mem_operand = MemOperand(external_pointer,
- ((constant_key + instr->additional_index())
- << shift_size));
+ constant_key * (1 << shift_size));
} else {
- if (instr->additional_index() == 0) {
- __ sll(scratch, key, shift_size);
- } else {
- __ sll(scratch, scratch, shift_size);
- }
+ __ sll(scratch, key, shift_size);
__ Addu(scratch, scratch, external_pointer);
mem_operand = MemOperand(scratch);
}
@@ -3676,10 +3644,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3717,21 +3682,20 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
__ li(new_map_reg, Operand(to_map));
- if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kRAHasBeenSaved, kDontSaveFPRegs);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
@@ -4486,9 +4450,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
// Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
@@ -4641,11 +4604,10 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // Deopt if the literal boilerplate ElementsKind is of a type different than
+ // the expected one. The check isn't necessary if the boilerplate has already
+ // been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
// Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 49a462ae15..1eb3ab7a0e 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -2023,9 +2023,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index f68e6ca105..a04b42961a 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -1201,7 +1201,6 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1218,14 +1217,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key) {
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@@ -1239,7 +1237,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1708,7 +1705,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1731,7 +1727,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@@ -1776,7 +1771,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 6cd5e97f62..e93a4175b3 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -3341,39 +3341,33 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, ls, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ Operand(Map::kMaximumBitField2FastElementValue));
}
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
}
@@ -3475,17 +3469,22 @@ void MacroAssembler::CompareMapAndBranch(Register obj,
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
Operand right = Operand(map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
- if (!current_map) break;
- Branch(early_success, eq, scratch, right);
- right = Operand(Handle<Map>(current_map));
- }
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ Branch(early_success, eq, scratch, right);
+ right = Operand(Handle<Map>(transitioned_fast_element_map));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ Branch(early_success, eq, scratch, right);
+ right = Operand(Handle<Map>(transitioned_double_map));
}
}
@@ -4444,37 +4443,27 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
- lw(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- Branch(no_map_match, ne, map_in_out, Operand(scratch));
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- lw(map_in_out, FieldMemOperand(scratch, offset));
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
+ Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
lw(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
map_out,
scratch,
&done);
@@ -5389,7 +5378,7 @@ CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 1766866ee8..f57418f386 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -819,8 +819,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
- Register map_out,
- bool can_have_holes);
+ Register map_out);
void LoadGlobalFunction(int index, Register function);
@@ -962,9 +961,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
+ void CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 8ea390e1f4..c48bcc497e 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -43,49 +43,44 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
- * - t7 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
* - t1 : Pointer to current code object (Code*) including heap object tag.
* - t2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - t3 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
- * - t4 : Points to tip of backtrack stack
+ * - t4 : points to tip of backtrack stack
* - t5 : Unused.
* - t6 : End of input (points to byte after last character in input).
* - fp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
- * - sp : Points to tip of C stack.
+ * - sp : points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
*
- * - fp[64] Isolate* isolate (address of the current isolate)
- * - fp[60] direct_call (if 1, direct call from JavaScript code,
+ * - fp[56] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[56] stack_area_base (High end of the memory area to use as
+ * - fp[52] stack_area_base (High end of the memory area to use as
* backtracking stack).
- * - fp[52] capture array size (may fit multiple sets of matches)
* - fp[48] int* capture_array (int[num_saved_registers_], for output).
* - fp[44] secondary link/return address used by native call.
* --- sp when called ---
- * - fp[40] return address (lr).
- * - fp[36] old frame pointer (r11).
+ * - fp[40] return address (lr).
+ * - fp[36] old frame pointer (r11).
* - fp[0..32] backup of registers s0..s7.
* --- frame pointer ----
- * - fp[-4] end of input (address of end of string).
- * - fp[-8] start of input (address of first character in string).
+ * - fp[-4] end of input (Address of end of string).
+ * - fp[-8] start of input (Address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] success counter (only for global regexps to count matches).
- * - fp[-24] Offset of location before start of input (effectively character
+ * - fp[-20] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
- * - fp[-28] At start (if 1, we are starting at the start of the
+ * - fp[-24] At start (if 1, we are starting at the start of the
* string, otherwise 0)
- * - fp[-32] register 0 (Only positions must be stored in the first
+ * - fp[-28] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -206,8 +201,8 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
+ __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+ BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart));
@@ -219,8 +214,8 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
+ __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+ BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart));
__ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
@@ -645,7 +640,6 @@ void RegExpMacroAssemblerMIPS::Fail() {
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
- Label return_v0;
if (masm_->has_exception()) {
// If the code gets corrupted due to long regular expressions and lack of
// space on trampolines, an internal exception flag is set. If this case
@@ -675,9 +669,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ mov(a0, zero_reg);
- __ push(a0); // Make room for success counter and initialize it to 0.
__ push(a0); // Make room for "position - 1" constant (value irrelevant).
+ __ push(a0); // Make room for "at start" constant (value irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -696,12 +689,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ li(v0, Operand(EXCEPTION));
- __ jmp(&return_v0);
+ __ jmp(&exit_label_);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(a0);
// If returned value is non-zero, we exit with the returned value as result.
- __ Branch(&return_v0, ne, v0, Operand(zero_reg));
+ __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
__ bind(&stack_ok);
// Allocate space on stack for registers.
@@ -722,44 +715,39 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// position registers.
__ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Initialize code pointer register
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
- __ li(current_character(), Operand('\n'));
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ mov(t5, a1);
+ __ li(a1, Operand(1));
+ __ Movn(a1, zero_reg, t5);
+ __ sw(a1, MemOperand(frame_pointer(), kAtStart));
- // Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1.
- if (num_saved_registers_ > 8) {
- // Address of register 0.
- __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
- __ li(a2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ sw(a0, MemOperand(a1));
- __ Addu(a1, a1, Operand(-kPointerSize));
- __ Subu(a2, a2, Operand(1));
- __ Branch(&init_loop, ne, a2, Operand(zero_reg));
- } else {
- for (int i = 0; i < num_saved_registers_; i++) {
- __ sw(a0, register_location(i));
- }
- }
+
+ // Address of register 0.
+ __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
+ __ li(a2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ sw(a0, MemOperand(a1));
+ __ Addu(a1, a1, Operand(-kPointerSize));
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&init_loop, ne, a2, Operand(zero_reg));
}
// Initialize backtrack stack pointer.
__ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
-
+ // Initialize code pointer register
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ // Load previous char as initial value of current character register.
+ Label at_start;
+ __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+ __ Branch(&at_start, ne, a0, Operand(zero_reg));
+ LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
+ __ jmp(&start_label_);
+ __ bind(&at_start);
+ __ li(current_character(), Operand('\n'));
__ jmp(&start_label_);
@@ -788,10 +776,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ lw(a2, register_location(i));
__ lw(a3, register_location(i + 1));
- if (global()) {
- // Keep capture start in a4 for the zero-length check later.
- __ mov(t7, a2);
- }
if (mode_ == UC16) {
__ sra(a2, a2, 1);
__ Addu(a2, a2, a1);
@@ -807,52 +791,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Addu(a0, a0, kPointerSize);
}
}
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- __ lw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ lw(a2, MemOperand(frame_pointer(), kRegisterOutput));
- // Increment success counter.
- __ Addu(a0, a0, 1);
- __ sw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ Subu(a1, a1, num_saved_registers_);
- // Check whether we have enough room for another set of capture results.
- __ mov(v0, a0);
- __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
-
- __ sw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
- // Advance the location for output.
- __ Addu(a2, a2, num_saved_registers_ * kPointerSize);
- __ sw(a2, MemOperand(frame_pointer(), kRegisterOutput));
-
- // Prepare a0 to initialize registers with its value in the next run.
- __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Special case for zero-length matches.
- // t7: capture start index
- // Not a zero-length match, restart.
- __ Branch(
- &load_char_start_regexp, ne, current_input_offset(), Operand(t7));
- // Offset from the end is zero if we already reached the end.
- __ Branch(&exit_label_, eq, current_input_offset(), Operand(zero_reg));
- // Advance current position after a zero-length match.
- __ Addu(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
- __ Branch(&load_char_start_regexp);
- } else {
- __ li(v0, Operand(SUCCESS));
- }
+ __ li(v0, Operand(SUCCESS));
}
// Exit and return v0.
__ bind(&exit_label_);
- if (global()) {
- __ lw(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- }
-
- __ bind(&return_v0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
@@ -878,7 +820,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ MultiPop(regexp_registers_to_retain);
// If returning non-zero, we should end execution with the given
// result as return value.
- __ Branch(&return_v0, ne, v0, Operand(zero_reg));
+ __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
// String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@@ -922,7 +864,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ li(v0, Operand(EXCEPTION));
- __ jmp(&return_v0);
+ __ jmp(&exit_label_);
}
}
@@ -1070,9 +1012,8 @@ void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
}
-bool RegExpMacroAssemblerMIPS::Succeed() {
+void RegExpMacroAssemblerMIPS::Succeed() {
__ jmp(&success_label_);
- return global();
}
@@ -1339,9 +1280,8 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
- // t7 is not being used to store the capture start index at this point.
- __ Addu(t7, current_input_offset(), Operand(cp_offset * char_size()));
- offset = t7;
+ __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = a0;
}
// We assume that we cannot do unaligned loads on MIPS, so this function
// must only be used to load a single character at a time.
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h
index 562d3fcd76..d167f624b6 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h
@@ -115,7 +115,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
+ virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@@ -141,8 +141,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kStackFrameHeader = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 20;
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@@ -154,10 +153,10 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kInputStartMinusOne = kInputString - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 776badc29b..1e72939876 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -50,16 +50,16 @@ namespace internal {
entry(p0, p1, p2, p3, p4)
typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, int, Address, int, Isolate*);
+ void*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
(FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -403,9 +403,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
+ entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index f8cf9704b1..676b523e23 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -1287,19 +1287,12 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
__ Push(receiver, holder_reg, name_reg);
} else {
__ Push(holder_reg, name_reg);
@@ -1323,14 +1316,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
__ bind(&interceptor_failed);
__ pop(name_reg);
__ pop(holder_reg);
- if (must_preserve_receiver_reg) {
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
__ pop(receiver);
}
// Leave the internal frame.
}
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
- if (must_perfrom_prototype_check) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
Handle<JSObject>(lookup->holder()),
@@ -1585,29 +1578,16 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiElements(a3, t3, &call_builtin);
+ __ CheckFastSmiOnlyElements(a3, t3, &call_builtin);
// edx: receiver
// r3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
a3,
t3,
- &try_holey_map);
- __ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- a3,
- t3,
&call_builtin);
__ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(a3, a3, &call_builtin);
@@ -3385,12 +3365,9 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3524,11 +3501,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3888,11 +3862,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3956,11 +3927,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -4131,11 +4099,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -4314,7 +4279,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
@@ -4342,7 +4307,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ Addu(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4351,7 +4316,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ Addu(scratch, scratch, scratch2);
__ sw(value_reg, MemOperand(scratch));
} else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
+ ASSERT(elements_kind == FAST_ELEMENTS);
__ Addu(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4360,6 +4325,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ Addu(scratch, scratch, scratch2);
__ sw(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
+ ASSERT(elements_kind == FAST_ELEMENTS);
__ RecordWrite(elements_reg, // Object.
scratch, // Address.
receiver_reg, // Value.
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 11a3df995b..cd2ccf81c7 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -286,11 +286,12 @@ void JSObject::JSObjectVerify() {
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
}
- ASSERT_EQ((map()->has_fast_smi_or_object_elements() ||
+ ASSERT_EQ((map()->has_fast_elements() ||
+ map()->has_fast_smi_only_elements() ||
(elements() == GetHeap()->empty_fixed_array())),
(elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map()));
- ASSERT(map()->has_fast_object_elements() == HasFastObjectElements());
+ ASSERT(map()->has_fast_elements() == HasFastElements());
}
@@ -302,8 +303,6 @@ void Map::MapVerify() {
instance_size() < HEAP->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
- SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
- SLOW_ASSERT(instance_descriptors()->IsConsistentWithBackPointers(this));
}
@@ -457,17 +456,10 @@ void String::StringVerify() {
ConsString::cast(this)->ConsStringVerify();
} else if (IsSlicedString()) {
SlicedString::cast(this)->SlicedStringVerify();
- } else if (IsSeqAsciiString()) {
- SeqAsciiString::cast(this)->SeqAsciiStringVerify();
}
}
-void SeqAsciiString::SeqAsciiStringVerify() {
- CHECK(String::IsAscii(GetChars(), length()));
-}
-
-
void ConsString::ConsStringVerify() {
CHECK(this->first()->IsString());
CHECK(this->second() == GetHeap()->empty_string() ||
@@ -516,7 +508,7 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
VerifyObjectField(JSGlobalProxy::kContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
- CHECK(HasFastObjectElements());
+ CHECK(HasFastElements());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
@@ -811,11 +803,6 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
}
// Indexed properties
switch (GetElementsKind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
int holes = 0;
@@ -829,14 +816,6 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_unused_elements_ += holes;
break;
}
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
ExternalPixelArray* e = ExternalPixelArray::cast(elements());
@@ -850,7 +829,8 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
dict->Capacity() - dict->NumberOfElements();
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ default:
+ UNREACHABLE();
break;
}
}
@@ -914,61 +894,6 @@ bool DescriptorArray::IsSortedNoDuplicates() {
}
-static bool CheckOneBackPointer(Map* current_map, Object* target) {
- return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
-}
-
-
-bool DescriptorArray::IsConsistentWithBackPointers(Map* current_map) {
- for (int i = 0; i < number_of_descriptors(); ++i) {
- switch (GetType(i)) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- if (!CheckOneBackPointer(current_map, GetValue(i))) {
- return false;
- }
- break;
- case ELEMENTS_TRANSITION: {
- Object* object = GetValue(i);
- if (!CheckOneBackPointer(current_map, object)) {
- return false;
- }
- if (object->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(object);
- for (int i = 0; i < array->length(); ++i) {
- if (!CheckOneBackPointer(current_map, array->get(i))) {
- return false;
- }
- }
- }
- break;
- }
- case CALLBACKS: {
- Object* object = GetValue(i);
- if (object->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(object);
- if (!CheckOneBackPointer(current_map, accessors->getter())) {
- return false;
- }
- if (!CheckOneBackPointer(current_map, accessors->setter())) {
- return false;
- }
- }
- break;
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- break;
- }
- }
- return true;
-}
-
-
void JSFunctionResultCache::JSFunctionResultCacheVerify() {
JSFunction::cast(get(kFactoryIndex))->Verify();
@@ -1010,28 +935,6 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
}
-void Map::ZapInstanceDescriptors() {
- DescriptorArray* descriptors = instance_descriptors();
- if (descriptors == GetHeap()->empty_descriptor_array()) return;
- FixedArray* contents = FixedArray::cast(
- descriptors->get(DescriptorArray::kContentArrayIndex));
- MemsetPointer(descriptors->data_start(),
- GetHeap()->the_hole_value(),
- descriptors->length());
- MemsetPointer(contents->data_start(),
- GetHeap()->the_hole_value(),
- contents->length());
-}
-
-
-void Map::ZapPrototypeTransitions() {
- FixedArray* proto_transitions = prototype_transitions();
- MemsetPointer(proto_transitions->data_start(),
- GetHeap()->the_hole_value(),
- proto_transitions->length());
-}
-
-
#endif // DEBUG
} } // namespace v8::internal
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 4afbe3edd7..459420c5d3 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -128,6 +128,18 @@ PropertyDetails PropertyDetails::AsDeleted() {
}
+bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ if (to_kind == FAST_ELEMENTS) {
+ return from_kind == FAST_SMI_ONLY_ELEMENTS ||
+ from_kind == FAST_DOUBLE_ELEMENTS;
+ } else {
+ return to_kind == FAST_DOUBLE_ELEMENTS &&
+ from_kind == FAST_SMI_ONLY_ELEMENTS;
+ }
+}
+
+
bool Object::IsFixedArrayBase() {
return IsFixedArray() || IsFixedDoubleArray();
}
@@ -1232,26 +1244,35 @@ FixedArrayBase* JSObject::elements() {
return static_cast<FixedArrayBase*>(array);
}
-
-void JSObject::ValidateElements() {
+void JSObject::ValidateSmiOnlyElements() {
#if DEBUG
- if (FLAG_enable_slow_asserts) {
- ElementsAccessor* accessor = GetElementsAccessor();
- accessor->Validate(this);
+ if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
+ Heap* heap = GetHeap();
+ // Don't use elements, since integrity checks will fail if there
+ // are filler pointers in the array.
+ FixedArray* fixed_array =
+ reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset));
+ Map* map = fixed_array->map();
+ // Arrays that have been shifted in place can't be verified.
+ if (map != heap->raw_unchecked_one_pointer_filler_map() &&
+ map != heap->raw_unchecked_two_pointer_filler_map() &&
+ map != heap->free_space_map()) {
+ for (int i = 0; i < fixed_array->length(); i++) {
+ Object* current = fixed_array->get(i);
+ ASSERT(current->IsSmi() || current->IsTheHole());
+ }
+ }
}
#endif
}
MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
- ValidateElements();
- ElementsKind elements_kind = map()->elements_kind();
- if (!IsFastObjectElementsKind(elements_kind)) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- return TransitionElementsKind(FAST_HOLEY_ELEMENTS);
- } else {
- return TransitionElementsKind(FAST_ELEMENTS);
- }
+#if DEBUG
+ ValidateSmiOnlyElements();
+#endif
+ if ((map()->elements_kind() != FAST_ELEMENTS)) {
+ return TransitionElementsKind(FAST_ELEMENTS);
}
return this;
}
@@ -1263,34 +1284,20 @@ MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
ElementsKind current_kind = map()->elements_kind();
ElementsKind target_kind = current_kind;
ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
- bool is_holey = IsFastHoleyElementsKind(current_kind);
- if (current_kind == FAST_HOLEY_ELEMENTS) return this;
+ if (current_kind == FAST_ELEMENTS) return this;
+
Heap* heap = GetHeap();
Object* the_hole = heap->the_hole_value();
Object* heap_number_map = heap->heap_number_map();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
- if (current == the_hole) {
- is_holey = true;
- target_kind = GetHoleyElementsKind(target_kind);
- } else if (!current->IsSmi()) {
+ if (!current->IsSmi() && current != the_hole) {
if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
- HeapObject::cast(current)->map() == heap_number_map &&
- IsFastSmiElementsKind(target_kind)) {
- if (is_holey) {
- target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_DOUBLE_ELEMENTS;
- }
+ HeapObject::cast(current)->map() == heap_number_map) {
+ target_kind = FAST_DOUBLE_ELEMENTS;
} else {
- if (!current->IsNumber()) {
- if (is_holey) {
- target_kind = FAST_HOLEY_ELEMENTS;
- break;
- } else {
- target_kind = FAST_ELEMENTS;
- }
- }
+ target_kind = FAST_ELEMENTS;
+ break;
}
}
}
@@ -1303,7 +1310,6 @@ MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
- uint32_t length,
EnsureElementsMode mode) {
if (elements->map() != GetHeap()->fixed_double_array_map()) {
ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
@@ -1312,19 +1318,11 @@ MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
- return EnsureCanContainElements(objects, length, mode);
+ return EnsureCanContainElements(objects, elements->length(), mode);
}
ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
- } else if (GetElementsKind() == FAST_SMI_ELEMENTS) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(elements);
- for (uint32_t i = 0; i < length; ++i) {
- if (double_array->is_the_hole(i)) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
- }
- }
+ if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
}
@@ -1336,20 +1334,21 @@ MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate,
ElementsKind to_kind) {
Map* current_map = map();
ElementsKind from_kind = current_map->elements_kind();
+
if (from_kind == to_kind) return current_map;
Context* global_context = isolate->context()->global_context();
- Object* maybe_array_maps = global_context->js_array_maps();
- if (maybe_array_maps->IsFixedArray()) {
- FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
- if (array_maps->get(from_kind) == current_map) {
- Object* maybe_transitioned_map = array_maps->get(to_kind);
- if (maybe_transitioned_map->IsMap()) {
- return Map::cast(maybe_transitioned_map);
+ if (current_map == global_context->smi_js_array_map()) {
+ if (to_kind == FAST_ELEMENTS) {
+ return global_context->object_js_array_map();
+ } else {
+ if (to_kind == FAST_DOUBLE_ELEMENTS) {
+ return global_context->double_js_array_map();
+ } else {
+ ASSERT(to_kind == DICTIONARY_ELEMENTS);
}
}
}
-
return GetElementsTransitionMapSlow(to_kind);
}
@@ -1358,6 +1357,9 @@ void JSObject::set_map_and_elements(Map* new_map,
FixedArrayBase* value,
WriteBarrierMode mode) {
ASSERT(value->HasValidElements());
+#ifdef DEBUG
+ ValidateSmiOnlyElements();
+#endif
if (new_map != NULL) {
if (mode == UPDATE_WRITE_BARRIER) {
set_map(new_map);
@@ -1366,7 +1368,8 @@ void JSObject::set_map_and_elements(Map* new_map,
set_map_no_write_barrier(new_map);
}
}
- ASSERT((map()->has_fast_smi_or_object_elements() ||
+ ASSERT((map()->has_fast_elements() ||
+ map()->has_fast_smi_only_elements() ||
(value == GetHeap()->empty_fixed_array())) ==
(value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map()));
@@ -1389,7 +1392,8 @@ void JSObject::initialize_properties() {
void JSObject::initialize_elements() {
- ASSERT(map()->has_fast_smi_or_object_elements() ||
+ ASSERT(map()->has_fast_elements() ||
+ map()->has_fast_smi_only_elements() ||
map()->has_fast_double_elements());
ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
@@ -1398,10 +1402,9 @@ void JSObject::initialize_elements() {
MaybeObject* JSObject::ResetElements() {
Object* obj;
- ElementsKind elements_kind = GetInitialFastElementsKind();
- if (!FLAG_smi_only_arrays) {
- elements_kind = FastSmiToObjectElementsKind(elements_kind);
- }
+ ElementsKind elements_kind = FLAG_smi_only_arrays
+ ? FAST_SMI_ONLY_ELEMENTS
+ : FAST_ELEMENTS;
MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
elements_kind);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -1673,11 +1676,6 @@ Object* FixedArray::get(int index) {
}
-bool FixedArray::is_the_hole(int index) {
- return get(index) == GetHeap()->the_hole_value();
-}
-
-
void FixedArray::set(int index, Smi* value) {
ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
@@ -2859,15 +2857,15 @@ bool Map::has_non_instance_prototype() {
void Map::set_function_with_prototype(bool value) {
if (value) {
- set_bit_field3(bit_field3() | (1 << kFunctionWithPrototype));
+ set_bit_field2(bit_field2() | (1 << kFunctionWithPrototype));
} else {
- set_bit_field3(bit_field3() & ~(1 << kFunctionWithPrototype));
+ set_bit_field2(bit_field2() & ~(1 << kFunctionWithPrototype));
}
}
bool Map::function_with_prototype() {
- return ((1 << kFunctionWithPrototype) & bit_field3()) != 0;
+ return ((1 << kFunctionWithPrototype) & bit_field2()) != 0;
}
@@ -3195,18 +3193,6 @@ void Code::set_compare_state(byte value) {
}
-byte Code::compare_operation() {
- ASSERT(is_compare_ic_stub());
- return READ_BYTE_FIELD(this, kCompareOperationOffset);
-}
-
-
-void Code::set_compare_operation(byte value) {
- ASSERT(is_compare_ic_stub());
- WRITE_BYTE_FIELD(this, kCompareOperationOffset, value);
-}
-
-
byte Code::to_boolean_state() {
ASSERT(is_to_boolean_ic_stub());
return READ_BYTE_FIELD(this, kToBooleanTypeOffset);
@@ -3353,9 +3339,6 @@ void Map::clear_instance_descriptors() {
Object* object = READ_FIELD(this,
kInstanceDescriptorsOrBitField3Offset);
if (!object->IsSmi()) {
-#ifdef DEBUG
- ZapInstanceDescriptors();
-#endif
WRITE_FIELD(
this,
kInstanceDescriptorsOrBitField3Offset,
@@ -3381,11 +3364,6 @@ void Map::set_instance_descriptors(DescriptorArray* value,
}
}
ASSERT(!is_shared());
-#ifdef DEBUG
- if (value != instance_descriptors()) {
- ZapInstanceDescriptors();
- }
-#endif
WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
CONDITIONAL_WRITE_BARRIER(
heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
@@ -3417,71 +3395,14 @@ void Map::set_bit_field3(int value) {
}
-Object* Map::GetBackPointer() {
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- if (object->IsFixedArray()) {
- return FixedArray::cast(object)->get(kProtoTransitionBackPointerOffset);
- } else {
- return object;
- }
-}
-
-
-void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
- Heap* heap = GetHeap();
- ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
- ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
- (value->IsMap() && GetBackPointer()->IsUndefined()));
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- if (object->IsFixedArray()) {
- FixedArray::cast(object)->set(
- kProtoTransitionBackPointerOffset, value, mode);
- } else {
- WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
- }
-}
-
-
-FixedArray* Map::prototype_transitions() {
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- if (object->IsFixedArray()) {
- return FixedArray::cast(object);
- } else {
- return GetHeap()->empty_fixed_array();
- }
-}
-
-
-void Map::set_prototype_transitions(FixedArray* value, WriteBarrierMode mode) {
- Heap* heap = GetHeap();
- ASSERT(value != heap->empty_fixed_array());
- value->set(kProtoTransitionBackPointerOffset, GetBackPointer());
-#ifdef DEBUG
- if (value != prototype_transitions()) {
- ZapPrototypeTransitions();
- }
-#endif
- WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
-}
-
-
-void Map::init_prototype_transitions(Object* undefined) {
- ASSERT(undefined->IsUndefined());
- WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, undefined);
-}
-
-
-HeapObject* Map::unchecked_prototype_transitions() {
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- return reinterpret_cast<HeapObject*>(object);
+FixedArray* Map::unchecked_prototype_transitions() {
+ return reinterpret_cast<FixedArray*>(
+ READ_FIELD(this, kPrototypeTransitionsOffset));
}
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
+ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
@@ -3739,12 +3660,6 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
}
-int SharedFunctionInfo::profiler_ticks() {
- if (code()->kind() != Code::FUNCTION) return 0;
- return code()->profiler_ticks();
-}
-
-
LanguageMode SharedFunctionInfo::language_mode() {
int hints = compiler_hints();
if (BooleanBit::get(hints, kExtendedModeFunction)) {
@@ -4010,32 +3925,27 @@ MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
global_context->get(Context::ARRAY_FUNCTION_INDEX);
if (array_function->IsJSFunction() &&
this == JSFunction::cast(array_function)) {
- // Replace all of the cached initial array maps in the global context with
- // the appropriate transitioned elements kind maps.
- Heap* heap = GetHeap();
- MaybeObject* maybe_maps =
- heap->AllocateFixedArrayWithHoles(kElementsKindCount);
- FixedArray* maps;
- if (!maybe_maps->To(&maps)) return maybe_maps;
-
- Map* current_map = initial_map;
- ElementsKind kind = current_map->elements_kind();
- ASSERT(kind == GetInitialFastElementsKind());
- maps->set(kind, current_map);
- for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
- i < kFastElementsKindCount; ++i) {
- ElementsKind transitioned_kind = GetFastElementsKindFromSequenceIndex(i);
- MaybeObject* maybe_new_map = current_map->CopyDropTransitions();
- Map* new_map = NULL;
- if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map;
- new_map->set_elements_kind(transitioned_kind);
- maybe_new_map = current_map->AddElementsTransition(transitioned_kind,
- new_map);
- if (maybe_new_map->IsFailure()) return maybe_new_map;
- maps->set(transitioned_kind, new_map);
- current_map = new_map;
- }
- global_context->set_js_array_maps(maps);
+ ASSERT(initial_map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+
+ MaybeObject* maybe_map = initial_map->CopyDropTransitions();
+ Map* new_double_map = NULL;
+ if (!maybe_map->To<Map>(&new_double_map)) return maybe_map;
+ new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
+ maybe_map = initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS,
+ new_double_map);
+ if (maybe_map->IsFailure()) return maybe_map;
+
+ maybe_map = new_double_map->CopyDropTransitions();
+ Map* new_object_map = NULL;
+ if (!maybe_map->To<Map>(&new_object_map)) return maybe_map;
+ new_object_map->set_elements_kind(FAST_ELEMENTS);
+ maybe_map = new_double_map->AddElementsTransition(FAST_ELEMENTS,
+ new_object_map);
+ if (maybe_map->IsFailure()) return maybe_map;
+
+ global_context->set_smi_js_array_map(initial_map);
+ global_context->set_double_js_array_map(new_double_map);
+ global_context->set_object_js_array_map(new_object_map);
}
set_initial_map(initial_map);
return this;
@@ -4371,18 +4281,18 @@ ElementsKind JSObject::GetElementsKind() {
FixedArrayBase* fixed_array =
reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
Map* map = fixed_array->map();
- ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
- (map == GetHeap()->fixed_array_map() ||
- map == GetHeap()->fixed_cow_array_map())) ||
- (IsFastDoubleElementsKind(kind) &&
- (fixed_array->IsFixedDoubleArray() ||
- fixed_array == GetHeap()->empty_fixed_array())) ||
- (kind == DICTIONARY_ELEMENTS &&
+ ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) &&
+ (map == GetHeap()->fixed_array_map() ||
+ map == GetHeap()->fixed_cow_array_map())) ||
+ (kind == FAST_DOUBLE_ELEMENTS &&
+ (fixed_array->IsFixedDoubleArray() ||
+ fixed_array == GetHeap()->empty_fixed_array())) ||
+ (kind == DICTIONARY_ELEMENTS &&
fixed_array->IsFixedArray() &&
- fixed_array->IsDictionary()) ||
- (kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
- (elements()->IsFixedArray() && elements()->length() >= 2));
+ fixed_array->IsDictionary()) ||
+ (kind > DICTIONARY_ELEMENTS));
+ ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+ (elements()->IsFixedArray() && elements()->length() >= 2));
#endif
return kind;
}
@@ -4393,28 +4303,25 @@ ElementsAccessor* JSObject::GetElementsAccessor() {
}
-bool JSObject::HasFastObjectElements() {
- return IsFastObjectElementsKind(GetElementsKind());
+bool JSObject::HasFastElements() {
+ return GetElementsKind() == FAST_ELEMENTS;
}
-bool JSObject::HasFastSmiElements() {
- return IsFastSmiElementsKind(GetElementsKind());
+bool JSObject::HasFastSmiOnlyElements() {
+ return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS;
}
-bool JSObject::HasFastSmiOrObjectElements() {
- return IsFastSmiOrObjectElementsKind(GetElementsKind());
+bool JSObject::HasFastTypeElements() {
+ ElementsKind elements_kind = GetElementsKind();
+ return elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_ELEMENTS;
}
bool JSObject::HasFastDoubleElements() {
- return IsFastDoubleElementsKind(GetElementsKind());
-}
-
-
-bool JSObject::HasFastHoleyElements() {
- return IsFastHoleyElementsKind(GetElementsKind());
+ return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
}
@@ -4471,7 +4378,7 @@ bool JSObject::HasIndexedInterceptor() {
MaybeObject* JSObject::EnsureWritableFastElements() {
- ASSERT(HasFastSmiOrObjectElements());
+ ASSERT(HasFastTypeElements());
FixedArray* elems = FixedArray::cast(elements());
Isolate* isolate = GetIsolate();
if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -4829,7 +4736,7 @@ void Map::ClearCodeCache(Heap* heap) {
void JSArray::EnsureSize(int required_size) {
- ASSERT(HasFastSmiOrObjectElements());
+ ASSERT(HasFastTypeElements());
FixedArray* elts = FixedArray::cast(elements());
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
@@ -4861,13 +4768,13 @@ bool JSArray::AllowsSetElementsLength() {
MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
MaybeObject* maybe_result = EnsureCanContainElements(
- storage, storage->length(), ALLOW_COPIED_DOUBLE_ELEMENTS);
+ storage, ALLOW_COPIED_DOUBLE_ELEMENTS);
if (maybe_result->IsFailure()) return maybe_result;
ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
- IsFastDoubleElementsKind(GetElementsKind())) ||
+ GetElementsKind() == FAST_DOUBLE_ELEMENTS) ||
((storage->map() != GetHeap()->fixed_double_array_map()) &&
- (IsFastObjectElementsKind(GetElementsKind()) ||
- (IsFastSmiElementsKind(GetElementsKind()) &&
+ ((GetElementsKind() == FAST_ELEMENTS) ||
+ (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
set_elements(storage);
set_length(Smi::FromInt(storage->length()));
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 3aea5f0aac..febdaabb12 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -318,9 +318,7 @@ void JSObject::PrintElements(FILE* out) {
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
switch (map()->elements_kind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
// Print in array notation for non-sparse arrays.
FixedArray* p = FixedArray::cast(elements());
@@ -331,7 +329,6 @@ void JSObject::PrintElements(FILE* out) {
}
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// Print in array notation for non-sparse arrays.
if (elements()->length() > 0) {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index ba460cf421..c3663eba3a 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -56,6 +56,11 @@
namespace v8 {
namespace internal {
+void PrintElementsKind(FILE* out, ElementsKind kind) {
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+ PrintF(out, "%s", accessor->name());
+}
+
MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
Object* value) {
@@ -538,7 +543,7 @@ bool JSObject::IsDirty() {
// If the object is fully fast case and has the same map it was
// created with then no changes can have been made to it.
return map() != fun->initial_map()
- || !HasFastObjectElements()
+ || !HasFastElements()
|| !HasFastProperties();
}
@@ -1062,9 +1067,7 @@ void String::StringShortPrint(StringStream* accumulator) {
void JSObject::JSObjectShortPrint(StringStream* accumulator) {
switch (map()->instance_type()) {
case JS_ARRAY_TYPE: {
- double length = JSArray::cast(this)->length()->IsUndefined()
- ? 0
- : JSArray::cast(this)->length()->Number();
+ double length = JSArray::cast(this)->length()->Number();
accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
break;
}
@@ -1601,7 +1604,6 @@ MaybeObject* JSObject::AddFastProperty(String* name,
// We have now allocated all the necessary objects.
// All the changes can be applied at once, so they are atomic.
map()->set_instance_descriptors(old_descriptors);
- new_map->SetBackPointer(map());
new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
set_map(new_map);
return FastPropertyAtPut(index, value);
@@ -1662,7 +1664,6 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
}
}
old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- Map::cast(new_map)->SetBackPointer(old_map);
return function;
}
@@ -1823,7 +1824,6 @@ MaybeObject* JSObject::ConvertDescriptorToFieldAndMapTransition(
}
}
old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- map()->SetBackPointer(old_map);
return result;
}
@@ -2199,29 +2199,34 @@ static Handle<T> MaybeNull(T* p) {
Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
- ElementsKind kind = elements_kind();
- Handle<Map> transitioned_map = Handle<Map>::null();
- Handle<Map> current_map(this);
- bool packed = IsFastPackedElementsKind(kind);
- if (IsTransitionableFastElementsKind(kind)) {
- while (CanTransitionToMoreGeneralFastElementsKind(kind, false)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, false);
- bool dummy = true;
- Handle<Map> maybe_transitioned_map =
- MaybeNull(current_map->LookupElementsTransitionMap(kind, &dummy));
- if (maybe_transitioned_map.is_null()) break;
- if (ContainsMap(candidates, maybe_transitioned_map) &&
- (packed || !IsFastPackedElementsKind(kind))) {
- transitioned_map = maybe_transitioned_map;
- if (!IsFastPackedElementsKind(kind)) packed = false;
- }
- current_map = maybe_transitioned_map;
- }
- }
- return transitioned_map;
+ ElementsKind elms_kind = elements_kind();
+ if (elms_kind == FAST_DOUBLE_ELEMENTS) {
+ bool dummy = true;
+ Handle<Map> fast_map =
+ MaybeNull(LookupElementsTransitionMap(FAST_ELEMENTS, &dummy));
+ if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
+ return fast_map;
+ }
+ return Handle<Map>::null();
+ }
+ if (elms_kind == FAST_SMI_ONLY_ELEMENTS) {
+ bool dummy = true;
+ Handle<Map> double_map =
+ MaybeNull(LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, &dummy));
+ // In the current implementation, if the DOUBLE map doesn't exist, the
+ // FAST map can't exist either.
+ if (double_map.is_null()) return Handle<Map>::null();
+ Handle<Map> fast_map =
+ MaybeNull(double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
+ &dummy));
+ if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
+ return fast_map;
+ }
+ if (ContainsMap(candidates, double_map)) return double_map;
+ }
+ return Handle<Map>::null();
}
-
static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
ElementsKind elements_kind) {
if (descriptor_contents->IsMap()) {
@@ -2330,36 +2335,24 @@ Object* Map::GetDescriptorContents(String* sentinel_name,
}
-Map* Map::LookupElementsTransitionMap(ElementsKind to_kind,
+Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind,
bool* safe_to_add_transition) {
- ElementsKind from_kind = elements_kind();
- if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
- if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
- if (safe_to_add_transition) *safe_to_add_transition = false;
- return NULL;
- }
- ElementsKind transitioned_from_kind =
- GetNextMoreGeneralFastElementsKind(from_kind, false);
-
-
- // If the transition is a single step in the transition sequence, fall
- // through to looking it up and returning it. If it requires several steps,
- // divide and conquer.
- if (transitioned_from_kind != to_kind) {
- // If the transition is several steps in the lattice, divide and conquer.
- Map* from_map = LookupElementsTransitionMap(transitioned_from_kind,
- safe_to_add_transition);
- if (from_map == NULL) return NULL;
- return from_map->LookupElementsTransitionMap(to_kind,
+ // Special case: indirect SMI->FAST transition (cf. comment in
+ // AddElementsTransition()).
+ if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ elements_kind == FAST_ELEMENTS) {
+ Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS,
+ safe_to_add_transition);
+ if (double_map == NULL) return double_map;
+ return double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
safe_to_add_transition);
- }
}
Object* descriptor_contents = GetDescriptorContents(
elements_transition_sentinel_name(), safe_to_add_transition);
if (descriptor_contents != NULL) {
Map* maybe_transition_map =
GetElementsTransitionMapFromDescriptor(descriptor_contents,
- to_kind);
+ elements_kind);
ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap());
return maybe_transition_map;
}
@@ -2367,35 +2360,29 @@ Map* Map::LookupElementsTransitionMap(ElementsKind to_kind,
}
-MaybeObject* Map::AddElementsTransition(ElementsKind to_kind,
+MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind,
Map* transitioned_map) {
- ElementsKind from_kind = elements_kind();
- if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
- ASSERT(IsMoreGeneralElementsKindTransition(from_kind, to_kind));
- ElementsKind transitioned_from_kind =
- GetNextMoreGeneralFastElementsKind(from_kind, false);
- // The map transitions graph should be a tree, therefore transitions to
- // ElementsKind that are not adjacent in the ElementsKind sequence are not
- // done directly, but instead by going through intermediate ElementsKinds
- // first.
- if (to_kind != transitioned_from_kind) {
- bool safe_to_add = true;
- Map* intermediate_map = LookupElementsTransitionMap(
- transitioned_from_kind, &safe_to_add);
- // This method is only called when safe_to_add has been found to be true
- // earlier.
- ASSERT(safe_to_add);
-
- if (intermediate_map == NULL) {
- MaybeObject* maybe_map = CopyDropTransitions();
- if (!maybe_map->To(&intermediate_map)) return maybe_map;
- intermediate_map->set_elements_kind(transitioned_from_kind);
- MaybeObject* maybe_transition = AddElementsTransition(
- transitioned_from_kind, intermediate_map);
- if (maybe_transition->IsFailure()) return maybe_transition;
- }
- return intermediate_map->AddElementsTransition(to_kind, transitioned_map);
- }
+ // The map transition graph should be a tree, therefore the transition
+ // from SMI to FAST elements is not done directly, but by going through
+ // DOUBLE elements first.
+ if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ elements_kind == FAST_ELEMENTS) {
+ bool safe_to_add = true;
+ Map* double_map = this->LookupElementsTransitionMap(
+ FAST_DOUBLE_ELEMENTS, &safe_to_add);
+ // This method is only called when safe_to_add_transition has been found
+ // to be true earlier.
+ ASSERT(safe_to_add);
+
+ if (double_map == NULL) {
+ MaybeObject* maybe_map = this->CopyDropTransitions();
+ if (!maybe_map->To(&double_map)) return maybe_map;
+ double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
+ MaybeObject* maybe_double_transition = this->AddElementsTransition(
+ FAST_DOUBLE_ELEMENTS, double_map);
+ if (maybe_double_transition->IsFailure()) return maybe_double_transition;
+ }
+ return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map);
}
bool safe_to_add_transition = true;
@@ -2421,7 +2408,6 @@ MaybeObject* Map::AddElementsTransition(ElementsKind to_kind,
return maybe_new_descriptors;
}
set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- transitioned_map->SetBackPointer(this);
return this;
}
@@ -2447,11 +2433,10 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
!current_map->IsUndefined() &&
!current_map->is_shared();
- // Prevent long chains of DICTIONARY -> FAST_*_ELEMENTS maps caused by objects
+ // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects
// with elements that switch back and forth between dictionary and fast
- // element modes.
- if (from_kind == DICTIONARY_ELEMENTS &&
- IsFastElementsKind(to_kind)) {
+ // element mode.
+ if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) {
safe_to_add_transition = false;
}
@@ -2978,18 +2963,12 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
// Preserve the attributes of this existing property.
attributes = result->GetAttributes();
return ConvertDescriptorToField(name, value, attributes);
- case CALLBACKS: {
- Object* callback_object = result->GetCallbackObject();
- if (callback_object->IsAccessorPair() &&
- !AccessorPair::cast(callback_object)->ContainsAccessor()) {
- return ConvertDescriptorToField(name, value, attributes);
- }
- return SetPropertyWithCallback(callback_object,
+ case CALLBACKS:
+ return SetPropertyWithCallback(result->GetCallbackObject(),
name,
value,
result->holder(),
strict_mode);
- }
case INTERCEPTOR:
return SetPropertyWithInterceptor(name, value, attributes, strict_mode);
case CONSTANT_TRANSITION: {
@@ -3493,7 +3472,8 @@ MaybeObject* JSObject::NormalizeElements() {
}
if (array->IsDictionary()) return array;
- ASSERT(HasFastSmiOrObjectElements() ||
+ ASSERT(HasFastElements() ||
+ HasFastSmiOnlyElements() ||
HasFastDoubleElements() ||
HasFastArgumentsElements());
// Compute the effective length and allocate a new backing store.
@@ -3528,7 +3508,8 @@ MaybeObject* JSObject::NormalizeElements() {
if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
} else {
- ASSERT(old_map->has_fast_smi_or_object_elements());
+ ASSERT(old_map->has_fast_elements() ||
+ old_map->has_fast_smi_only_elements());
value = FixedArray::cast(array)->get(i);
}
PropertyDetails details = PropertyDetails(NONE, NORMAL);
@@ -4015,9 +3996,9 @@ MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
- ASSERT(IsFastObjectElementsKind(kind) ||
+ ASSERT(kind == FAST_ELEMENTS ||
kind == DICTIONARY_ELEMENTS);
- if (IsFastObjectElementsKind(kind)) {
+ if (kind == FAST_ELEMENTS) {
int length = IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
: elements->length();
@@ -4069,15 +4050,12 @@ bool JSObject::ReferencesObject(Object* obj) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
// Raw pixels and external arrays do not reference other
// objects.
break;
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
break;
case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS: {
FixedArray* elements = FixedArray::cast(this->elements());
if (ReferencesObjectFromElements(elements, kind, obj)) return true;
@@ -4093,8 +4071,7 @@ bool JSObject::ReferencesObject(Object* obj) {
}
// Check the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS :
- FAST_HOLEY_ELEMENTS;
+ kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
break;
}
@@ -4328,7 +4305,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) {
}
-// Search object and its prototype chain for callback properties.
+// Search object and it's prototype chain for callback properties.
void JSObject::LookupCallback(String* name, LookupResult* result) {
Heap* heap = GetHeap();
for (Object* current = this;
@@ -4372,12 +4349,9 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
Object* setter,
PropertyAttributes attributes) {
switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -4439,12 +4413,7 @@ MaybeObject* JSObject::CreateAccessorPairFor(String* name) {
LookupResult result(GetHeap()->isolate());
LocalLookupRealNamedProperty(name, &result);
if (result.IsProperty() && result.type() == CALLBACKS) {
- // Note that the result can actually have IsDontDelete() == true when we
- // e.g. have to fall back to the slow case while adding a setter after
- // successfully reusing a map transition for a getter. Nevertheless, this is
- // OK, because the assertion only holds for the whole addition of both
- // accessors, not for the addition of each part. See first comment in
- // DefinePropertyAccessor below.
+ ASSERT(!result.IsDontDelete());
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
return AccessorPair::cast(obj)->CopyWithoutTransitions();
@@ -4458,28 +4427,6 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name,
Object* getter,
Object* setter,
PropertyAttributes attributes) {
- // We could assert that the property is configurable here, but we would need
- // to do a lookup, which seems to be a bit of overkill.
- Heap* heap = GetHeap();
- bool only_attribute_changes = getter->IsNull() && setter->IsNull();
- if (HasFastProperties() && !only_attribute_changes) {
- MaybeObject* getterOk = heap->undefined_value();
- if (!getter->IsNull()) {
- getterOk = DefineFastAccessor(name, ACCESSOR_GETTER, getter, attributes);
- if (getterOk->IsFailure()) return getterOk;
- }
-
- MaybeObject* setterOk = heap->undefined_value();
- if (getterOk != heap->null_value() && !setter->IsNull()) {
- setterOk = DefineFastAccessor(name, ACCESSOR_SETTER, setter, attributes);
- if (setterOk->IsFailure()) return setterOk;
- }
-
- if (getterOk != heap->null_value() && setterOk != heap->null_value()) {
- return heap->undefined_value();
- }
- }
-
AccessorPair* accessors;
{ MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
if (!maybe_accessors->To(&accessors)) return maybe_accessors;
@@ -4494,7 +4441,7 @@ bool JSObject::CanSetCallback(String* name) {
GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
// Check if there is an API defined callback object which prohibits
- // callback overwriting in this object or its prototype chain.
+ // callback overwriting in this object or it's prototype chain.
// This mechanism is needed for instance in a browser setting, where
// certain accessors such as window.location should not be allowed
// to be overwritten because allowing overwriting could potentially
@@ -4629,159 +4576,6 @@ MaybeObject* JSObject::DefineAccessor(String* name,
}
-static MaybeObject* CreateFreshAccessor(JSObject* obj,
- String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes) {
- // step 1: create a new getter/setter pair with only the accessor in it
- Heap* heap = obj->GetHeap();
- AccessorPair* accessors2;
- { MaybeObject* maybe_accessors2 = heap->AllocateAccessorPair();
- if (!maybe_accessors2->To(&accessors2)) return maybe_accessors2;
- }
- accessors2->set(component, accessor);
-
- // step 2: create a copy of the descriptors, incl. the new getter/setter pair
- Map* map1 = obj->map();
- CallbacksDescriptor callbacks_descr2(name, accessors2, attributes);
- DescriptorArray* descriptors2;
- { MaybeObject* maybe_descriptors2 =
- map1->instance_descriptors()->CopyInsert(&callbacks_descr2,
- REMOVE_TRANSITIONS);
- if (!maybe_descriptors2->To(&descriptors2)) return maybe_descriptors2;
- }
-
- // step 3: create a new map with the new descriptors
- Map* map2;
- { MaybeObject* maybe_map2 = map1->CopyDropDescriptors();
- if (!maybe_map2->To(&map2)) return maybe_map2;
- }
- map2->set_instance_descriptors(descriptors2);
-
- // step 4: create a new getter/setter pair with a transition to the new map
- AccessorPair* accessors1;
- { MaybeObject* maybe_accessors1 = heap->AllocateAccessorPair();
- if (!maybe_accessors1->To(&accessors1)) return maybe_accessors1;
- }
- accessors1->set(component, map2);
-
- // step 5: create a copy of the descriptors, incl. the new getter/setter pair
- // with the transition
- CallbacksDescriptor callbacks_descr1(name, accessors1, attributes);
- DescriptorArray* descriptors1;
- { MaybeObject* maybe_descriptors1 =
- map1->instance_descriptors()->CopyInsert(&callbacks_descr1,
- KEEP_TRANSITIONS);
- if (!maybe_descriptors1->To(&descriptors1)) return maybe_descriptors1;
- }
-
- // step 6: everything went well so far, so we make our changes visible
- obj->set_map(map2);
- map1->set_instance_descriptors(descriptors1);
- map2->SetBackPointer(map1);
- return obj;
-}
-
-
-static bool TransitionToSameAccessor(Object* map,
- String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes ) {
- DescriptorArray* descs = Map::cast(map)->instance_descriptors();
- int number = descs->SearchWithCache(name);
- ASSERT(number != DescriptorArray::kNotFound);
- Object* target_accessor =
- AccessorPair::cast(descs->GetCallbacksObject(number))->get(component);
- PropertyAttributes target_attributes = descs->GetDetails(number).attributes();
- return target_accessor == accessor && target_attributes == attributes;
-}
-
-
-static MaybeObject* NewCallbackTransition(JSObject* obj,
- String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes,
- AccessorPair* accessors2) {
- // step 1: copy the old getter/setter pair and set the new accessor
- AccessorPair* accessors3;
- { MaybeObject* maybe_accessors3 = accessors2->CopyWithoutTransitions();
- if (!maybe_accessors3->To(&accessors3)) return maybe_accessors3;
- }
- accessors3->set(component, accessor);
-
- // step 2: create a copy of the descriptors, incl. the new getter/setter pair
- Map* map2 = obj->map();
- CallbacksDescriptor callbacks_descr3(name, accessors3, attributes);
- DescriptorArray* descriptors3;
- { MaybeObject* maybe_descriptors3 =
- map2->instance_descriptors()->CopyInsert(&callbacks_descr3,
- REMOVE_TRANSITIONS);
- if (!maybe_descriptors3->To(&descriptors3)) return maybe_descriptors3;
- }
-
- // step 3: create a new map with the new descriptors
- Map* map3;
- { MaybeObject* maybe_map3 = map2->CopyDropDescriptors();
- if (!maybe_map3->To(&map3)) return maybe_map3;
- }
- map3->set_instance_descriptors(descriptors3);
-
- // step 4: everything went well so far, so we make our changes visible
- obj->set_map(map3);
- accessors2->set(component, map3);
- map3->SetBackPointer(map2);
- return obj;
-}
-
-
-MaybeObject* JSObject::DefineFastAccessor(String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes) {
- ASSERT(accessor->IsSpecFunction() || accessor->IsUndefined());
- LookupResult result(GetIsolate());
- LocalLookup(name, &result);
-
- // If we have a new property, create a fresh accessor plus a transition to it.
- if (!result.IsFound()) {
- return CreateFreshAccessor(this, name, component, accessor, attributes);
- }
-
- // If the property is not a JavaScript accessor, fall back to the slow case.
- if (result.type() != CALLBACKS) return GetHeap()->null_value();
- Object* callback_value = result.GetCallbackObject();
- if (!callback_value->IsAccessorPair()) return GetHeap()->null_value();
- AccessorPair* accessors = AccessorPair::cast(callback_value);
-
- // Follow a callback transition, if there is a fitting one.
- Object* entry = accessors->get(component);
- if (entry->IsMap() &&
- TransitionToSameAccessor(entry, name, component, accessor, attributes)) {
- set_map(Map::cast(entry));
- return this;
- }
-
- // When we re-add the same accessor again, there is nothing to do.
- if (entry == accessor && result.GetAttributes() == attributes) return this;
-
- // Only the other accessor has been set so far, create a new transition.
- if (entry->IsTheHole()) {
- return NewCallbackTransition(this,
- name,
- component,
- accessor,
- attributes,
- accessors);
- }
-
- // Nothing from the above worked, so we have to fall back to the slow case.
- return GetHeap()->null_value();
-}
-
-
MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
Isolate* isolate = GetIsolate();
String* name = String::cast(info->name());
@@ -4818,12 +4612,9 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
// Accessors overwrite previous callbacks (cf. with getters/setters).
switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -5168,7 +4959,7 @@ class IntrusiveMapTransitionIterator {
// underlying array while it is running.
class IntrusivePrototypeTransitionIterator {
public:
- explicit IntrusivePrototypeTransitionIterator(HeapObject* proto_trans)
+ explicit IntrusivePrototypeTransitionIterator(FixedArray* proto_trans)
: proto_trans_(proto_trans) { }
void Start() {
@@ -5193,7 +4984,7 @@ class IntrusivePrototypeTransitionIterator {
private:
bool HasTransitions() {
- return proto_trans_->map()->IsSmi() || proto_trans_->IsFixedArray();
+ return proto_trans_->length() >= Map::kProtoTransitionHeaderSize;
}
Object** Header() {
@@ -5201,16 +4992,12 @@ class IntrusivePrototypeTransitionIterator {
}
int NumberOfTransitions() {
- ASSERT(HasTransitions());
- FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
- Object* num = proto_trans->get(Map::kProtoTransitionNumberOfEntriesOffset);
+ Object* num = proto_trans_->get(Map::kProtoTransitionNumberOfEntriesOffset);
return Smi::cast(num)->value();
}
Map* GetTransition(int transitionNumber) {
- ASSERT(HasTransitions());
- FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
- return Map::cast(proto_trans->get(IndexFor(transitionNumber)));
+ return Map::cast(proto_trans_->get(IndexFor(transitionNumber)));
}
int IndexFor(int transitionNumber) {
@@ -5219,7 +5006,7 @@ class IntrusivePrototypeTransitionIterator {
transitionNumber * Map::kProtoTransitionElementsPerEntry;
}
- HeapObject* proto_trans_;
+ FixedArray* proto_trans_;
};
@@ -5940,15 +5727,21 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
int index = Search(descriptor->GetKey());
const bool replacing = (index != kNotFound);
bool keep_enumeration_index = false;
- if (!replacing) {
- ++new_size;
- } else if (!IsTransitionOnly(index)) {
- // We are replacing an existing descriptor. We keep the enumeration index
- // of a visible property.
- keep_enumeration_index = true;
- } else if (remove_transitions) {
- // Replaced descriptor has been counted as removed if it is a transition
- // that will be replaced. Adjust count in this case.
+ if (replacing) {
+ // We are replacing an existing descriptor. We keep the enumeration
+ // index of a visible property.
+ PropertyType t = GetDetails(index).type();
+ if (t == CONSTANT_FUNCTION ||
+ t == FIELD ||
+ t == CALLBACKS ||
+ t == INTERCEPTOR) {
+ keep_enumeration_index = true;
+ } else if (remove_transitions) {
+ // Replaced descriptor has been counted as removed if it is
+ // a transition that will be replaced. Adjust count in this case.
+ ++new_size;
+ }
+ } else {
++new_size;
}
@@ -6146,8 +5939,8 @@ MaybeObject* AccessorPair::CopyWithoutTransitions() {
Object* AccessorPair::GetComponent(AccessorComponent component) {
- Object* accessor = get(component);
- return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
+ Object* accessor = (component == ACCESSOR_GETTER) ? getter() : setter();
+ return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
}
@@ -7375,23 +7168,85 @@ void String::PrintOn(FILE* file) {
}
-// Clear a possible back pointer in case the transition leads to a dead map.
-// Return true in case a back pointer has been cleared and false otherwise.
-// Set *keep_entry to true when a live map transition has been found.
-static bool ClearBackPointer(Heap* heap, Object* target, bool* keep_entry) {
- if (!target->IsMap()) return false;
- Map* map = Map::cast(target);
+void Map::CreateOneBackPointer(Object* transition_target) {
+ if (!transition_target->IsMap()) return;
+ Map* target = Map::cast(transition_target);
+#ifdef DEBUG
+ // Verify target.
+ Object* source_prototype = prototype();
+ Object* target_prototype = target->prototype();
+ ASSERT(source_prototype->IsJSReceiver() ||
+ source_prototype->IsMap() ||
+ source_prototype->IsNull());
+ ASSERT(target_prototype->IsJSReceiver() ||
+ target_prototype->IsNull());
+ ASSERT(source_prototype->IsMap() ||
+ source_prototype == target_prototype);
+#endif
+ // Point target back to source. set_prototype() will not let us set
+ // the prototype to a map, as we do here.
+ *RawField(target, kPrototypeOffset) = this;
+}
+
+
+void Map::CreateBackPointers() {
+ DescriptorArray* descriptors = instance_descriptors();
+ for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
+ switch (descriptors->GetType(i)) {
+ case MAP_TRANSITION:
+ case CONSTANT_TRANSITION:
+ CreateOneBackPointer(descriptors->GetValue(i));
+ break;
+ case ELEMENTS_TRANSITION: {
+ Object* object = descriptors->GetValue(i);
+ if (object->IsMap()) {
+ CreateOneBackPointer(object);
+ } else {
+ FixedArray* array = FixedArray::cast(object);
+ for (int i = 0; i < array->length(); ++i) {
+ CreateOneBackPointer(array->get(i));
+ }
+ }
+ break;
+ }
+ case CALLBACKS: {
+ Object* object = descriptors->GetValue(i);
+ if (object->IsAccessorPair()) {
+ AccessorPair* accessors = AccessorPair::cast(object);
+ CreateOneBackPointer(accessors->getter());
+ CreateOneBackPointer(accessors->setter());
+ }
+ break;
+ }
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ case HANDLER:
+ case INTERCEPTOR:
+ case NULL_DESCRIPTOR:
+ break;
+ }
+ }
+}
+
+
+bool Map::RestoreOneBackPointer(Object* object,
+ Object* real_prototype,
+ bool* keep_entry) {
+ if (!object->IsMap()) return false;
+ Map* map = Map::cast(object);
if (Marking::MarkBitFrom(map).Get()) {
*keep_entry = true;
return false;
- } else {
- map->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER);
- return true;
}
+ ASSERT(map->prototype() == this || map->prototype() == real_prototype);
+ // Getter prototype() is read-only, set_prototype() has side effects.
+ *RawField(map, Map::kPrototypeOffset) = real_prototype;
+ return true;
}
-void Map::ClearNonLiveTransitions(Heap* heap) {
+void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
DescriptorArray* d = DescriptorArray::cast(
*RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
if (d->IsEmpty()) return;
@@ -7404,22 +7259,24 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
// If the pair (value, details) is a map transition, check if the target is
// live. If not, null the descriptor. Also drop the back pointer for that
// map transition, so that this map is not reached again by following a back
- // pointer from that non-live map.
+ // pointer from a non-live object.
bool keep_entry = false;
PropertyDetails details(Smi::cast(contents->get(i + 1)));
switch (details.type()) {
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
- ClearBackPointer(heap, contents->get(i), &keep_entry);
+ RestoreOneBackPointer(contents->get(i), real_prototype, &keep_entry);
break;
case ELEMENTS_TRANSITION: {
Object* object = contents->get(i);
if (object->IsMap()) {
- ClearBackPointer(heap, object, &keep_entry);
+ RestoreOneBackPointer(object, real_prototype, &keep_entry);
} else {
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); ++j) {
- if (ClearBackPointer(heap, array->get(j), &keep_entry)) {
+ if (RestoreOneBackPointer(array->get(j),
+ real_prototype,
+ &keep_entry)) {
array->set_undefined(j);
}
}
@@ -7430,10 +7287,14 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
Object* object = contents->get(i);
if (object->IsAccessorPair()) {
AccessorPair* accessors = AccessorPair::cast(object);
- if (ClearBackPointer(heap, accessors->getter(), &keep_entry)) {
+ if (RestoreOneBackPointer(accessors->getter(),
+ real_prototype,
+ &keep_entry)) {
accessors->set_getter(heap->the_hole_value());
}
- if (ClearBackPointer(heap, accessors->setter(), &keep_entry)) {
+ if (RestoreOneBackPointer(accessors->setter(),
+ real_prototype,
+ &keep_entry)) {
accessors->set_setter(heap->the_hole_value());
}
} else {
@@ -8281,20 +8142,6 @@ void Code::ClearInlineCaches() {
}
-void Code::ClearTypeFeedbackCells(Heap* heap) {
- Object* raw_info = type_feedback_info();
- if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackCells* type_feedback_cells =
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
- for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- ASSERT(type_feedback_cells->AstId(i)->IsSmi());
- JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
- cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
- }
- }
-}
-
-
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
@@ -8531,10 +8378,6 @@ void Code::Disassemble(const char* name, FILE* out) {
CompareIC::State state = CompareIC::ComputeState(this);
PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
}
- if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
- Token::Value op = CompareIC::ComputeOperation(this);
- PrintF(out, "compare_operation = %s\n", Token::Name(op));
- }
}
if ((name != NULL) && (name[0] != '\0')) {
PrintF(out, "name = %s\n", name);
@@ -8606,7 +8449,7 @@ void Code::Disassemble(const char* name, FILE* out) {
MaybeObject* JSObject::SetFastElementsCapacityAndLength(
int capacity,
int length,
- SetFastElementsCapacitySmiMode smi_mode) {
+ SetFastElementsCapacityMode set_capacity_mode) {
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
@@ -8617,40 +8460,32 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
if (!maybe->To(&new_elements)) return maybe;
}
- ElementsKind elements_kind = GetElementsKind();
- ElementsKind new_elements_kind;
- // The resized array has FAST_*_SMI_ELEMENTS if the capacity mode forces it,
- // or if it's allowed and the old elements array contained only SMIs.
- bool has_fast_smi_elements =
- (smi_mode == kForceSmiElements) ||
- ((smi_mode == kAllowSmiElements) && HasFastSmiElements());
- if (has_fast_smi_elements) {
- if (IsHoleyElementsKind(elements_kind)) {
- new_elements_kind = FAST_HOLEY_SMI_ELEMENTS;
- } else {
- new_elements_kind = FAST_SMI_ELEMENTS;
- }
- } else {
- if (IsHoleyElementsKind(elements_kind)) {
- new_elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- new_elements_kind = FAST_ELEMENTS;
- }
+ // Find the new map to use for this object if there is a map change.
+ Map* new_map = NULL;
+ if (elements()->map() != heap->non_strict_arguments_elements_map()) {
+ // The resized array has FAST_SMI_ONLY_ELEMENTS if the capacity mode forces
+ // it, or if it's allowed and the old elements array contained only SMIs.
+ bool has_fast_smi_only_elements =
+ (set_capacity_mode == kForceSmiOnlyElements) ||
+ ((set_capacity_mode == kAllowSmiOnlyElements) &&
+ (elements()->map()->has_fast_smi_only_elements() ||
+ elements() == heap->empty_fixed_array()));
+ ElementsKind elements_kind = has_fast_smi_only_elements
+ ? FAST_SMI_ONLY_ELEMENTS
+ : FAST_ELEMENTS;
+ MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
+ if (!maybe->To(&new_map)) return maybe;
}
+
FixedArrayBase* old_elements = elements();
+ ElementsKind elements_kind = GetElementsKind();
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
- { MaybeObject* maybe_obj =
- accessor->CopyElements(this, new_elements, new_elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
+ ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS)
+ ? FAST_SMI_ONLY_ELEMENTS
+ : FAST_ELEMENTS;
+ // int copy_size = Min(old_elements_raw->length(), new_elements->length());
+ accessor->CopyElements(this, new_elements, to_kind);
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- Map* new_map = map();
- if (new_elements_kind != elements_kind) {
- MaybeObject* maybe =
- GetElementsTransitionMap(GetIsolate(), new_elements_kind);
- if (!maybe->To(&new_map)) return maybe;
- }
- ValidateElements();
set_map_and_elements(new_map, new_elements);
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
@@ -8662,9 +8497,11 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
GetElementsKind(), new_elements);
}
+ // Update the length if necessary.
if (IsJSArray()) {
JSArray::cast(this)->set_length(Smi::FromInt(length));
}
+
return new_elements;
}
@@ -8682,28 +8519,17 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
if (!maybe_obj->To(&elems)) return maybe_obj;
}
- ElementsKind elements_kind = GetElementsKind();
- ElementsKind new_elements_kind = elements_kind;
- if (IsHoleyElementsKind(elements_kind)) {
- new_elements_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
- } else {
- new_elements_kind = FAST_DOUBLE_ELEMENTS;
- }
-
Map* new_map;
{ MaybeObject* maybe_obj =
- GetElementsTransitionMap(heap->isolate(), new_elements_kind);
+ GetElementsTransitionMap(heap->isolate(), FAST_DOUBLE_ELEMENTS);
if (!maybe_obj->To(&new_map)) return maybe_obj;
}
FixedArrayBase* old_elements = elements();
+ ElementsKind elements_kind = GetElementsKind();
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
- { MaybeObject* maybe_obj =
- accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
+ accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- ValidateElements();
set_map_and_elements(new_map, elems);
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
@@ -8712,7 +8538,7 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
if (FLAG_trace_elements_transitions) {
PrintElementsTransition(stdout, elements_kind, old_elements,
- GetElementsKind(), elems);
+ FAST_DOUBLE_ELEMENTS, elems);
}
if (IsJSArray()) {
@@ -8992,10 +8818,8 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
(Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -9006,8 +8830,7 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
break;
}
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
(Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -9291,7 +9114,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
bool check_prototype) {
- ASSERT(HasFastSmiOrObjectElements() ||
+ ASSERT(HasFastTypeElements() ||
HasFastArgumentsElements());
FixedArray* backing_store = FixedArray::cast(elements());
@@ -9317,29 +9140,13 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
// Check if the length property of this object needs to be updated.
uint32_t array_length = 0;
bool must_update_array_length = false;
- bool introduces_holes = true;
if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
- introduces_holes = index > array_length;
if (index >= array_length) {
must_update_array_length = true;
array_length = index + 1;
}
- } else {
- introduces_holes = index >= capacity;
- }
-
- // If the array is growing, and it's not growth by a single element at the
- // end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = GetElementsKind();
- if (introduces_holes &&
- IsFastElementsKind(elements_kind) &&
- !IsFastHoleyElementsKind(elements_kind)) {
- ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
- if (maybe->IsFailure()) return maybe;
}
-
// Check if the capacity of the backing store needs to be increased, or if
// a transition to slow elements is necessary.
if (index >= capacity) {
@@ -9359,44 +9166,42 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
}
}
// Convert to fast double elements if appropriate.
- if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
+ if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) {
MaybeObject* maybe =
SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
if (maybe->IsFailure()) return maybe;
FixedDoubleArray::cast(elements())->set(index, value->Number());
- ValidateElements();
return value;
}
- // Change elements kind from Smi-only to generic FAST if necessary.
- if (HasFastSmiElements() && !value->IsSmi()) {
+ // Change elements kind from SMI_ONLY to generic FAST if necessary.
+ if (HasFastSmiOnlyElements() && !value->IsSmi()) {
Map* new_map;
- ElementsKind kind = HasFastHoleyElements()
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
- MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
- kind);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
+ { MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
+ FAST_ELEMENTS);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ }
set_map(new_map);
+ if (FLAG_trace_elements_transitions) {
+ PrintElementsTransition(stdout, FAST_SMI_ONLY_ELEMENTS, elements(),
+ FAST_ELEMENTS, elements());
+ }
}
// Increase backing store capacity if that's been decided previously.
if (new_capacity != capacity) {
FixedArray* new_elements;
- SetFastElementsCapacitySmiMode smi_mode =
- value->IsSmi() && HasFastSmiElements()
- ? kAllowSmiElements
- : kDontAllowSmiElements;
+ SetFastElementsCapacityMode set_capacity_mode =
+ value->IsSmi() && HasFastSmiOnlyElements()
+ ? kAllowSmiOnlyElements
+ : kDontAllowSmiOnlyElements;
{ MaybeObject* maybe =
SetFastElementsCapacityAndLength(new_capacity,
array_length,
- smi_mode);
+ set_capacity_mode);
if (!maybe->To(&new_elements)) return maybe;
}
new_elements->set(index, value);
- ValidateElements();
return value;
}
-
// Finally, set the new element and length.
ASSERT(elements()->IsFixedArray());
backing_store->set(index, value);
@@ -9520,21 +9325,20 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
} else {
new_length = dictionary->max_number_key() + 1;
}
- SetFastElementsCapacitySmiMode smi_mode = FLAG_smi_only_arrays
- ? kAllowSmiElements
- : kDontAllowSmiElements;
+ SetFastElementsCapacityMode set_capacity_mode = FLAG_smi_only_arrays
+ ? kAllowSmiOnlyElements
+ : kDontAllowSmiOnlyElements;
bool has_smi_only_elements = false;
bool should_convert_to_fast_double_elements =
ShouldConvertToFastDoubleElements(&has_smi_only_elements);
if (has_smi_only_elements) {
- smi_mode = kForceSmiElements;
+ set_capacity_mode = kForceSmiOnlyElements;
}
MaybeObject* result = should_convert_to_fast_double_elements
? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
: SetFastElementsCapacityAndLength(new_length,
new_length,
- smi_mode);
- ValidateElements();
+ set_capacity_mode);
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -9573,40 +9377,27 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
// If the value object is not a heap number, switch to fast elements and try
// again.
bool value_is_smi = value->IsSmi();
- bool introduces_holes = true;
- uint32_t length = elms_length;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
- introduces_holes = index > length;
- } else {
- introduces_holes = index >= elms_length;
- }
-
if (!value->IsNumber()) {
+ Object* obj;
+ uint32_t length = elms_length;
+ if (IsJSArray()) {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ }
MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
elms_length,
length,
- kDontAllowSmiElements);
- if (maybe_obj->IsFailure()) return maybe_obj;
- maybe_obj = SetFastElement(index, value, strict_mode, check_prototype);
- if (maybe_obj->IsFailure()) return maybe_obj;
- ValidateElements();
- return maybe_obj;
+ kDontAllowSmiOnlyElements);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ return SetFastElement(index,
+ value,
+ strict_mode,
+ check_prototype);
}
double double_value = value_is_smi
? static_cast<double>(Smi::cast(value)->value())
: HeapNumber::cast(value)->value();
- // If the array is growing, and it's not growth by a single element at the
- // end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = GetElementsKind();
- if (introduces_holes && !IsFastHoleyElementsKind(elements_kind)) {
- ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
- if (maybe->IsFailure()) return maybe;
- }
-
// Check whether there is extra space in the fixed array.
if (index < elms_length) {
FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
@@ -9628,11 +9419,13 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
int new_capacity = NewElementsCapacity(index+1);
if (!ShouldConvertToSlowElements(new_capacity)) {
ASSERT(static_cast<uint32_t>(new_capacity) > index);
- MaybeObject* maybe_obj =
- SetFastDoubleElementsCapacityAndLength(new_capacity, index + 1);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ Object* obj;
+ { MaybeObject* maybe_obj =
+ SetFastDoubleElementsCapacityAndLength(new_capacity,
+ index + 1);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
FixedDoubleArray::cast(elements())->set(index, double_value);
- ValidateElements();
return value;
}
}
@@ -9776,13 +9569,10 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
(attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
Isolate* isolate = GetIsolate();
switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
return SetFastElement(index, value, strict_mode, check_prototype);
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
return SetFastDoubleElement(index, value, strict_mode, check_prototype);
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
@@ -9873,19 +9663,11 @@ Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
ElementsKind from_kind = map()->elements_kind();
- if (IsFastHoleyElementsKind(from_kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- }
-
Isolate* isolate = GetIsolate();
- if (elements() == isolate->heap()->empty_fixed_array() ||
- (IsFastSmiOrObjectElementsKind(from_kind) &&
- IsFastSmiOrObjectElementsKind(to_kind)) ||
- (from_kind == FAST_DOUBLE_ELEMENTS &&
- to_kind == FAST_HOLEY_DOUBLE_ELEMENTS)) {
- ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
- // No change is needed to the elements() buffer, the transition
- // only requires a map change.
+ if ((from_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements() == isolate->heap()->empty_fixed_array()) &&
+ to_kind == FAST_ELEMENTS) {
+ ASSERT(from_kind != FAST_ELEMENTS);
MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
Map* new_map;
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
@@ -9912,21 +9694,18 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
}
}
- if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
MaybeObject* maybe_result =
SetFastDoubleElementsCapacityAndLength(capacity, length);
if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
return this;
}
- if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
+ if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
- capacity, length, kDontAllowSmiElements);
+ capacity, length, kDontAllowSmiOnlyElements);
if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
return this;
}
@@ -9940,14 +9719,10 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
// static
bool Map::IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind) {
- // Transitions can't go backwards.
- if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
- return false;
- }
-
- // Transitions from HOLEY -> PACKED are not allowed.
- return !IsFastHoleyElementsKind(from_kind) ||
- IsFastHoleyElementsKind(to_kind);
+ return
+ (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ (to_kind == FAST_DOUBLE_ELEMENTS || to_kind == FAST_ELEMENTS)) ||
+ (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS);
}
@@ -10038,10 +9813,8 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
break;
}
// Fall through.
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
backing_store = FixedArray::cast(backing_store_base);
*capacity = backing_store->length();
for (int i = 0; i < *capacity; ++i) {
@@ -10055,8 +9828,7 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
*used = dictionary->NumberOfElements();
break;
}
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
*capacity = elms->length();
for (int i = 0; i < *capacity; i++) {
@@ -10326,19 +10098,16 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
if (this->IsStringObjectWithCharacterAt(index)) return true;
switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- uint32_t length = IsJSArray() ?
+ case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length());
return (index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole();
}
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -10538,7 +10307,7 @@ int JSObject::NumberOfLocalElements(PropertyAttributes filter) {
int JSObject::NumberOfEnumElements() {
// Fast case for objects with no elements.
- if (!IsJSValue() && HasFastObjectElements()) {
+ if (!IsJSValue() && HasFastElements()) {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -10554,10 +10323,8 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
PropertyAttributes filter) {
int counter = 0;
switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
FixedArray::cast(elements())->length();
@@ -10572,8 +10339,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter);
break;
}
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
FixedDoubleArray::cast(elements())->length();
@@ -11506,9 +11272,10 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
// Convert to fast elements.
Object* obj;
- MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
- FAST_HOLEY_ELEMENTS);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ { MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
+ FAST_ELEMENTS);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
Map* new_map = Map::cast(obj);
PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
@@ -11519,9 +11286,9 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
}
FixedArray* fast_elements = FixedArray::cast(new_array);
dict->CopyValuesTo(fast_elements);
- ValidateElements();
- set_map_and_elements(new_map, fast_elements);
+ set_map(new_map);
+ set_elements(fast_elements);
} else if (HasExternalArrayElements()) {
// External arrays cannot have holes or undefined elements.
return Smi::FromInt(ExternalArray::cast(elements())->length());
@@ -11531,7 +11298,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
}
- ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements());
+ ASSERT(HasFastTypeElements() || HasFastDoubleElements());
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
@@ -13078,7 +12845,7 @@ int BreakPointInfo::GetBreakPointCount() {
#endif // ENABLE_DEBUGGER_SUPPORT
-Object* JSDate::GetField(Object* object, Smi* index) {
+MaybeObject* JSDate::GetField(Object* object, Smi* index) {
return JSDate::cast(object)->DoGetField(
static_cast<FieldIndex>(index->value()));
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index e9dfe6ca55..80d1fd47d0 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -30,7 +30,6 @@
#include "allocation.h"
#include "builtins.h"
-#include "elements-kind.h"
#include "list.h"
#include "property-details.h"
#include "smart-array-pointer.h"
@@ -132,6 +131,40 @@
namespace v8 {
namespace internal {
+enum ElementsKind {
+ // The "fast" kind for elements that only contain SMI values. Must be first
+ // to make it possible to efficiently check maps for this kind.
+ FAST_SMI_ONLY_ELEMENTS,
+
+ // The "fast" kind for tagged values. Must be second to make it possible to
+ // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
+ // together at once.
+ FAST_ELEMENTS,
+
+ // The "fast" kind for unwrapped, non-tagged double values.
+ FAST_DOUBLE_ELEMENTS,
+
+ // The "slow" kind.
+ DICTIONARY_ELEMENTS,
+ NON_STRICT_ARGUMENTS_ELEMENTS,
+ // The "fast" kind for external arrays
+ EXTERNAL_BYTE_ELEMENTS,
+ EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
+ EXTERNAL_SHORT_ELEMENTS,
+ EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
+ EXTERNAL_INT_ELEMENTS,
+ EXTERNAL_UNSIGNED_INT_ELEMENTS,
+ EXTERNAL_FLOAT_ELEMENTS,
+ EXTERNAL_DOUBLE_ELEMENTS,
+ EXTERNAL_PIXEL_ELEMENTS,
+
+ // Derived constants from ElementsKind
+ FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
+ LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+ FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS,
+ LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
+};
+
enum CompareMapMode {
REQUIRE_EXACT_MAP,
ALLOW_ELEMENT_TRANSITION_MAPS
@@ -142,6 +175,13 @@ enum KeyedAccessGrowMode {
ALLOW_JSARRAY_GROWTH
};
+const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
+
+void PrintElementsKind(FILE* out, ElementsKind kind);
+
+inline bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+ ElementsKind to_kind);
+
// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
@@ -664,13 +704,12 @@ enum CompareResult {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
-class AccessorPair;
class DictionaryElementsAccessor;
class ElementsAccessor;
-class Failure;
class FixedArrayBase;
class ObjectVisitor;
class StringStream;
+class Failure;
struct ValueInfo : public Malloced {
ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -1470,19 +1509,13 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT inline MaybeObject* ResetElements();
inline ElementsKind GetElementsKind();
inline ElementsAccessor* GetElementsAccessor();
- // Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind.
- inline bool HasFastSmiElements();
- // Returns true if an object has elements of FAST_ELEMENTS ElementsKind.
- inline bool HasFastObjectElements();
- // Returns true if an object has elements of FAST_ELEMENTS or
- // FAST_SMI_ONLY_ELEMENTS.
- inline bool HasFastSmiOrObjectElements();
- // Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
- // ElementsKind.
+ inline bool HasFastSmiOnlyElements();
+ inline bool HasFastElements();
+ // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT
+ // elements. TODO(danno): Rename HasFastTypeElements to HasFastElements() and
+ // HasFastElements to HasFastObjectElements.
+ inline bool HasFastTypeElements();
inline bool HasFastDoubleElements();
- // Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS
- // ElementsKind.
- inline bool HasFastHoleyElements();
inline bool HasNonStrictArgumentsElements();
inline bool HasDictionaryElements();
inline bool HasExternalPixelElements();
@@ -1609,14 +1642,6 @@ class JSObject: public JSReceiver {
Object* getter,
Object* setter,
PropertyAttributes attributes);
- // Try to define a single accessor paying attention to map transitions.
- // Returns a JavaScript null if this was not possible and we have to use the
- // slow case. Note that we can fail due to allocations, too.
- MUST_USE_RESULT MaybeObject* DefineFastAccessor(
- String* name,
- AccessorComponent component,
- Object* accessor,
- PropertyAttributes attributes);
Object* LookupAccessor(String* name, AccessorComponent component);
MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
@@ -1685,7 +1710,7 @@ class JSObject: public JSReceiver {
static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
- inline void ValidateElements();
+ inline void ValidateSmiOnlyElements();
// Makes sure that this object can contain HeapObject as elements.
MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements();
@@ -1697,7 +1722,6 @@ class JSObject: public JSReceiver {
EnsureElementsMode mode);
MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
FixedArrayBase* elements,
- uint32_t length,
EnsureElementsMode mode);
MUST_USE_RESULT MaybeObject* EnsureCanContainElements(
Arguments* arguments,
@@ -1796,10 +1820,10 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver,
uint32_t index);
- enum SetFastElementsCapacitySmiMode {
- kAllowSmiElements,
- kForceSmiElements,
- kDontAllowSmiElements
+ enum SetFastElementsCapacityMode {
+ kAllowSmiOnlyElements,
+ kForceSmiOnlyElements,
+ kDontAllowSmiOnlyElements
};
// Replace the elements' backing store with fast elements of the given
@@ -1808,7 +1832,7 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
int capacity,
int length,
- SetFastElementsCapacitySmiMode smi_mode);
+ SetFastElementsCapacityMode set_capacity_mode);
MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
int capacity,
int length);
@@ -2576,9 +2600,6 @@ class DescriptorArray: public FixedArray {
// Is the descriptor array sorted and without duplicates?
bool IsSortedNoDuplicates();
- // Is the descriptor array consistent with the back pointers in targets?
- bool IsConsistentWithBackPointers(Map* current_map);
-
// Are two DescriptorArrays equal?
bool IsEqualTo(DescriptorArray* other);
#endif
@@ -4270,11 +4291,6 @@ class Code: public HeapObject {
inline byte compare_state();
inline void set_compare_state(byte value);
- // [compare_operation]: For kind COMPARE_IC tells what compare operation the
- // stub was generated for.
- inline byte compare_operation();
- inline void set_compare_operation(byte value);
-
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
inline void set_to_boolean_state(byte value);
@@ -4410,7 +4426,6 @@ class Code: public HeapObject {
void CodeVerify();
#endif
void ClearInlineCaches();
- void ClearTypeFeedbackCells(Heap* heap);
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
@@ -4459,8 +4474,6 @@ class Code: public HeapObject {
static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
- static const int kCompareOperationOffset = kCompareStateOffset + 1;
-
static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
@@ -4614,21 +4627,17 @@ class Map: public HeapObject {
}
// Tells whether the instance has fast elements that are only Smis.
- inline bool has_fast_smi_elements() {
- return IsFastSmiElementsKind(elements_kind());
+ inline bool has_fast_smi_only_elements() {
+ return elements_kind() == FAST_SMI_ONLY_ELEMENTS;
}
// Tells whether the instance has fast elements.
- inline bool has_fast_object_elements() {
- return IsFastObjectElementsKind(elements_kind());
- }
-
- inline bool has_fast_smi_or_object_elements() {
- return IsFastSmiOrObjectElementsKind(elements_kind());
+ inline bool has_fast_elements() {
+ return elements_kind() == FAST_ELEMENTS;
}
inline bool has_fast_double_elements() {
- return IsFastDoubleElementsKind(elements_kind());
+ return elements_kind() == FAST_DOUBLE_ELEMENTS;
}
inline bool has_non_strict_arguments_elements() {
@@ -4693,30 +4702,19 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
- // [back pointer]: points back to the parent map from which a transition
- // leads to this map. The field overlaps with prototype transitions and the
- // back pointer will be moved into the prototype transitions array if
- // required.
- inline Object* GetBackPointer();
- inline void SetBackPointer(Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
// [prototype transitions]: cache of prototype transitions.
// Prototype transition is a transition that happens
// when we change object's prototype to a new one.
// Cache format:
// 0: finger - index of the first free cell in the cache
- // 1: back pointer that overlaps with prototype transitions field.
- // 2 + 2 * i: prototype
- // 3 + 2 * i: target map
+ // 1 + 2 * i: prototype
+ // 2 + 2 * i: target map
DECL_ACCESSORS(prototype_transitions, FixedArray)
- inline void init_prototype_transitions(Object* undefined);
- inline HeapObject* unchecked_prototype_transitions();
+ inline FixedArray* unchecked_prototype_transitions();
- static const int kProtoTransitionHeaderSize = 2;
+ static const int kProtoTransitionHeaderSize = 1;
static const int kProtoTransitionNumberOfEntriesOffset = 0;
- static const int kProtoTransitionBackPointerOffset = 1;
static const int kProtoTransitionElementsPerEntry = 2;
static const int kProtoTransitionPrototypeOffset = 0;
static const int kProtoTransitionMapOffset = 1;
@@ -4788,10 +4786,25 @@ class Map: public HeapObject {
// Removes a code object from the code cache at the given index.
void RemoveFromCodeCache(String* name, Code* code, int index);
- // Set all map transitions from this map to dead maps to null. Also clear
- // back pointers in transition targets so that we do not process this map
- // again while following back pointers.
- void ClearNonLiveTransitions(Heap* heap);
+ // For every transition in this map, makes the transition's
+ // target's prototype pointer point back to this map.
+ // This is undone in MarkCompactCollector::ClearNonLiveTransitions().
+ void CreateBackPointers();
+
+ void CreateOneBackPointer(Object* transition_target);
+
+ // Set all map transitions from this map to dead maps to null.
+ // Also, restore the original prototype on the targets of these
+ // transitions, so that we do not process this map again while
+ // following back pointers.
+ void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
+
+ // Restore a possible back pointer in the prototype field of object.
+ // Return true in that case and false otherwise. Set *keep_entry to
+ // true when a live map transition has been found.
+ bool RestoreOneBackPointer(Object* object,
+ Object* real_prototype,
+ bool* keep_entry);
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
@@ -4826,14 +4839,6 @@ class Map: public HeapObject {
Handle<Map> FindTransitionedMap(MapHandleList* candidates);
Map* FindTransitionedMap(MapList* candidates);
- // Zaps the contents of backing data structures in debug mode. Note that the
- // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects
- // holding weak references when incremental marking is used, because it also
- // iterates over objects that are otherwise unreachable.
-#ifdef DEBUG
- void ZapInstanceDescriptors();
- void ZapPrototypeTransitions();
-#endif
// Dispatched behavior.
#ifdef OBJECT_PRINT
@@ -4881,17 +4886,16 @@ class Map: public HeapObject {
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset =
kInstanceDescriptorsOrBitField3Offset + kPointerSize;
- static const int kPrototypeTransitionsOrBackPointerOffset =
+ static const int kPrototypeTransitionsOffset =
kCodeCacheOffset + kPointerSize;
- static const int kPadStart =
- kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
+ static const int kPadStart = kPrototypeTransitionsOffset + kPointerSize;
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
// being continuously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
static const int kPointerFieldsEndOffset =
- kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
+ Map::kPrototypeTransitionsOffset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -4924,31 +4928,25 @@ class Map: public HeapObject {
// Bit positions for bit field 2
static const int kIsExtensible = 0;
- static const int kStringWrapperSafeForDefaultValueOf = 1;
- static const int kAttachedToSharedFunctionInfo = 2;
+ static const int kFunctionWithPrototype = 1;
+ static const int kStringWrapperSafeForDefaultValueOf = 2;
+ static const int kAttachedToSharedFunctionInfo = 3;
// No bits can be used after kElementsKindFirstBit, they are all reserved for
// storing ElementKind.
- static const int kElementsKindShift = 3;
- static const int kElementsKindBitCount = 5;
+ static const int kElementsKindShift = 4;
+ static const int kElementsKindBitCount = 4;
// Derived values from bit field 2
static const int kElementsKindMask = (-1 << kElementsKindShift) &
((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
(FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
- static const int8_t kMaximumBitField2FastSmiElementValue =
- static_cast<int8_t>((FAST_SMI_ELEMENTS + 1) <<
- Map::kElementsKindShift) - 1;
- static const int8_t kMaximumBitField2FastHoleyElementValue =
- static_cast<int8_t>((FAST_HOLEY_ELEMENTS + 1) <<
- Map::kElementsKindShift) - 1;
- static const int8_t kMaximumBitField2FastHoleySmiElementValue =
- static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1) <<
+ static const int8_t kMaximumBitField2FastSmiOnlyElementValue =
+ static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) <<
Map::kElementsKindShift) - 1;
// Bit positions for bit field 3
static const int kIsShared = 0;
- static const int kFunctionWithPrototype = 1;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
@@ -5336,8 +5334,6 @@ class SharedFunctionInfo: public HeapObject {
inline int deopt_counter();
inline void set_deopt_counter(int counter);
- inline int profiler_ticks();
-
// Inline cache age is used to infer whether the function survived a context
// disposal or not. In the former case we reset the opt_count.
inline int ic_age();
@@ -6124,7 +6120,7 @@ class JSDate: public JSObject {
// Returns the date field with the specified index.
// See FieldIndex for the list of date fields.
- static Object* GetField(Object* date, Smi* index);
+ static MaybeObject* GetField(Object* date, Smi* index);
void SetValue(Object* value, bool is_value_nan);
@@ -6873,7 +6869,7 @@ class String: public HeapObject {
inline void Set(int index, uint16_t value);
// Get individual two byte char in the string. Repeated calls
// to this method are not efficient unless the string is flat.
- INLINE(uint16_t Get(int index));
+ inline uint16_t Get(int index);
// Try to flatten the string. Checks first inline to see if it is
// necessary. Does nothing if the string is not a cons string.
@@ -7228,10 +7224,6 @@ class SeqAsciiString: public SeqString {
unsigned* offset,
unsigned chars);
-#ifdef DEBUG
- void SeqAsciiStringVerify();
-#endif
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
};
@@ -8109,18 +8101,6 @@ class AccessorPair: public Struct {
MUST_USE_RESULT MaybeObject* CopyWithoutTransitions();
- Object* get(AccessorComponent component) {
- return component == ACCESSOR_GETTER ? getter() : setter();
- }
-
- void set(AccessorComponent component, Object* value) {
- if (component == ACCESSOR_GETTER) {
- set_getter(value);
- } else {
- set_setter(value);
- }
- }
-
// Note: Returns undefined instead in case of a hole.
Object* GetComponent(AccessorComponent component);
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 3a7a973d40..862051956b 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -3767,12 +3767,10 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Handle<FixedArray> object_literals =
isolate()->factory()->NewFixedArray(values->length(), TENURED);
Handle<FixedDoubleArray> double_literals;
- ElementsKind elements_kind = FAST_SMI_ELEMENTS;
+ ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
bool has_only_undefined_values = true;
- bool has_hole_values = false;
// Fill in the literals.
- Heap* heap = isolate()->heap();
bool is_simple = true;
int depth = 1;
for (int i = 0, n = values->length(); i < n; i++) {
@@ -3781,18 +3779,12 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
depth = m_literal->depth() + 1;
}
Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsTheHole()) {
- has_hole_values = true;
+ if (boilerplate_value->IsUndefined()) {
object_literals->set_the_hole(i);
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
double_literals->set_the_hole(i);
}
- } else if (boilerplate_value->IsUndefined()) {
is_simple = false;
- object_literals->set(i, Smi::FromInt(0));
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- double_literals->set(i, 0);
- }
} else {
// Examine each literal element, and adjust the ElementsKind if the
// literal element is not of a type that can be stored in the current
@@ -3802,7 +3794,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// ultimately end up in FAST_ELEMENTS.
has_only_undefined_values = false;
object_literals->set(i, *boilerplate_value);
- if (elements_kind == FAST_SMI_ELEMENTS) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
// Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
// FAST_ELEMENTS is required.
if (!boilerplate_value->IsSmi()) {
@@ -3850,7 +3842,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
elements_kind != FAST_DOUBLE_ELEMENTS) {
- object_literals->set_map(heap->fixed_cow_array_map());
+ object_literals->set_map(isolate()->heap()->fixed_cow_array_map());
}
Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
@@ -3862,10 +3854,6 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Handle<FixedArray> literals =
isolate()->factory()->NewFixedArray(2, TENURED);
- if (has_hole_values || !FLAG_packed_arrays) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- }
-
literals->set(0, Smi::FromInt(elements_kind));
literals->set(1, *element_values);
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 511759c485..4a6845ef39 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -554,7 +554,6 @@ class FreeBSDMutex : public Mutex {
ASSERT(result == 0);
result = pthread_mutex_init(&mutex_, &attrs);
ASSERT(result == 0);
- USE(result);
}
virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index d942d78a55..66316594c8 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -421,11 +421,7 @@ Socket* POSIXSocket::Accept() const {
return NULL;
}
- int socket;
- do {
- socket = accept(socket_, NULL, NULL);
- } while (socket == -1 && errno == EINTR);
-
+ int socket = accept(socket_, NULL, NULL);
if (socket == -1) {
return NULL;
} else {
@@ -452,9 +448,7 @@ bool POSIXSocket::Connect(const char* host, const char* port) {
}
// Connect.
- do {
- status = connect(socket_, result->ai_addr, result->ai_addrlen);
- } while (status == -1 && errno == EINTR);
+ status = connect(socket_, result->ai_addr, result->ai_addrlen);
freeaddrinfo(result);
return status == 0;
}
@@ -473,29 +467,14 @@ bool POSIXSocket::Shutdown() {
int POSIXSocket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else if (errno != EINTR) {
- return 0;
- }
- }
- return written;
+ int status = send(socket_, data, len, 0);
+ return status;
}
int POSIXSocket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
- int status;
- do {
- status = recv(socket_, data, len, 0);
- } while (status == -1 && errno == EINTR);
- return (status < 0) ? 0 : status;
+ int status = recv(socket_, data, len, 0);
+ return status;
}
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 2473949dec..9e377a1977 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -1848,26 +1848,14 @@ bool Win32Socket::Shutdown() {
int Win32Socket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else {
- return 0;
- }
- }
- return written;
+ int status = send(socket_, data, len, 0);
+ return status;
}
int Win32Socket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
int status = recv(socket_, data, len, 0);
- return (status == SOCKET_ERROR) ? 0 : status;
+ return status;
}
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index a2ddf7a625..168791a0a4 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -653,7 +653,6 @@ class Socket {
virtual bool Shutdown() = 0;
// Data Transimission
- // Return 0 on failure.
virtual int Send(const char* data, int len) const = 0;
virtual int Receive(char* data, int len) const = 0;
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 6c64350e8d..284e2dfa36 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -96,31 +96,8 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
HeapEntry* HeapGraphEdge::from() const {
- return &snapshot()->entries()[from_index_];
-}
-
-
-HeapSnapshot* HeapGraphEdge::snapshot() const {
- return to_entry_->snapshot();
-}
-
-
-int HeapEntry::index() const {
- return static_cast<int>(this - &snapshot_->entries().first());
-}
-
-
-int HeapEntry::set_children_index(int index) {
- children_index_ = index;
- int next_index = index + children_count_;
- children_count_ = 0;
- return next_index;
-}
-
-
-HeapGraphEdge** HeapEntry::children_arr() {
- ASSERT(children_index_ >= 0);
- return &snapshot_->children()[children_index_];
+ return const_cast<HeapEntry*>(
+ reinterpret_cast<const HeapEntry*>(this - child_index_) - 1);
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 0fe7499d70..c91e83bb78 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -931,63 +931,78 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
-HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
- : type_(type),
- from_index_(from),
- to_index_(to),
- name_(name) {
+void HeapGraphEdge::Init(
+ int child_index, Type type, const char* name, HeapEntry* to) {
ASSERT(type == kContextVariable
- || type == kProperty
- || type == kInternal
- || type == kShortcut);
+ || type == kProperty
+ || type == kInternal
+ || type == kShortcut);
+ child_index_ = child_index;
+ type_ = type;
+ name_ = name;
+ to_ = to;
}
-HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
- : type_(type),
- from_index_(from),
- to_index_(to),
- index_(index) {
+void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
ASSERT(type == kElement || type == kHidden || type == kWeak);
+ child_index_ = child_index;
+ type_ = type;
+ index_ = index;
+ to_ = to;
}
-void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) {
- to_entry_ = &snapshot->entries()[to_index_];
+void HeapGraphEdge::Init(int child_index, int index, HeapEntry* to) {
+ Init(child_index, kElement, index, to);
}
-const int HeapEntry::kNoEntry = -1;
-
-HeapEntry::HeapEntry(HeapSnapshot* snapshot,
+void HeapEntry::Init(HeapSnapshot* snapshot,
Type type,
const char* name,
SnapshotObjectId id,
- int self_size)
- : type_(type),
- children_count_(0),
- children_index_(-1),
- self_size_(self_size),
- id_(id),
- snapshot_(snapshot),
- name_(name) { }
+ int self_size,
+ int children_count,
+ int retainers_count) {
+ snapshot_ = snapshot;
+ type_ = type;
+ painted_ = false;
+ user_reachable_ = false;
+ name_ = name;
+ self_size_ = self_size;
+ retained_size_ = 0;
+ entry_index_ = -1;
+ children_count_ = children_count;
+ retainers_count_ = retainers_count;
+ dominator_ = NULL;
+ id_ = id;
+}
void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
+ int child_index,
const char* name,
- HeapEntry* entry) {
- HeapGraphEdge edge(type, name, this->index(), entry->index());
- snapshot_->edges().Add(edge);
- ++children_count_;
+ HeapEntry* entry,
+ int retainer_index) {
+ children()[child_index].Init(child_index, type, name, entry);
+ entry->retainers()[retainer_index] = children_arr() + child_index;
}
void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
+ int child_index,
int index,
- HeapEntry* entry) {
- HeapGraphEdge edge(type, index, this->index(), entry->index());
- snapshot_->edges().Add(edge);
- ++children_count_;
+ HeapEntry* entry,
+ int retainer_index) {
+ children()[child_index].Init(child_index, type, index, entry);
+ entry->retainers()[retainer_index] = children_arr() + child_index;
+}
+
+
+void HeapEntry::SetUnidirElementReference(
+ int child_index, int index, HeapEntry* entry) {
+ children()[child_index].Init(child_index, index, entry);
}
@@ -998,9 +1013,9 @@ Handle<HeapObject> HeapEntry::GetHeapObject() {
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
- STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6d @%6u %*c %s%s: ",
- self_size(), id(), indent, ' ', prefix, edge_name);
+ OS::Print("%6d %7d @%6llu %*c %s%s: ",
+ self_size(), retained_size(), id(),
+ indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@@ -1016,9 +1031,9 @@ void HeapEntry::Print(
OS::Print("\"\n");
}
if (--max_depth == 0) return;
- Vector<HeapGraphEdge*> ch = children();
+ Vector<HeapGraphEdge> ch = children();
for (int i = 0; i < ch.length(); ++i) {
- HeapGraphEdge& edge = *ch[i];
+ HeapGraphEdge& edge = ch[i];
const char* edge_prefix = "";
EmbeddedVector<char, 64> index;
const char* edge_name = index.start();
@@ -1074,6 +1089,15 @@ const char* HeapEntry::TypeAsString() {
}
+size_t HeapEntry::EntriesSize(int entries_count,
+ int children_count,
+ int retainers_count) {
+ return sizeof(HeapEntry) * entries_count // NOLINT
+ + sizeof(HeapGraphEdge) * children_count // NOLINT
+ + sizeof(HeapGraphEdge*) * retainers_count; // NOLINT
+}
+
+
// It is very important to keep objects that form a heap snapshot
// as small as possible.
namespace { // Avoid littering the global namespace.
@@ -1082,13 +1106,13 @@ template <size_t ptr_size> struct SnapshotSizeConstants;
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 24;
+ static const int kExpectedHeapEntrySize = 36;
static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
};
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 32;
+ static const int kExpectedHeapEntrySize = 48;
static const uint64_t kMaxSerializableSnapshotRawSize =
static_cast<uint64_t>(6000) * MB;
};
@@ -1103,9 +1127,11 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
type_(type),
title_(title),
uid_(uid),
- root_index_(HeapEntry::kNoEntry),
- gc_roots_index_(HeapEntry::kNoEntry),
- natives_root_index_(HeapEntry::kNoEntry),
+ root_entry_(NULL),
+ gc_roots_entry_(NULL),
+ natives_root_entry_(NULL),
+ raw_entries_(NULL),
+ number_of_edges_(0),
max_snapshot_js_object_id_(0) {
STATIC_CHECK(
sizeof(HeapGraphEdge) ==
@@ -1114,11 +1140,16 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
sizeof(HeapEntry) ==
SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
- gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
+ gc_subroot_entries_[i] = NULL;
}
}
+HeapSnapshot::~HeapSnapshot() {
+ DeleteArray(raw_entries_);
+}
+
+
void HeapSnapshot::Delete() {
collection_->RemoveSnapshot(this);
delete this;
@@ -1130,67 +1161,97 @@ void HeapSnapshot::RememberLastJSObjectId() {
}
-HeapEntry* HeapSnapshot::AddRootEntry() {
- ASSERT(root_index_ == HeapEntry::kNoEntry);
+void HeapSnapshot::AllocateEntries(int entries_count,
+ int children_count,
+ int retainers_count) {
+ ASSERT(raw_entries_ == NULL);
+ number_of_edges_ = children_count;
+ raw_entries_size_ =
+ HeapEntry::EntriesSize(entries_count, children_count, retainers_count);
+ raw_entries_ = NewArray<char>(raw_entries_size_);
+}
+
+
+static void HeapEntryClearPaint(HeapEntry** entry_ptr) {
+ (*entry_ptr)->clear_paint();
+}
+
+
+void HeapSnapshot::ClearPaint() {
+ entries_.Iterate(HeapEntryClearPaint);
+}
+
+
+HeapEntry* HeapSnapshot::AddRootEntry(int children_count) {
+ ASSERT(root_entry_ == NULL);
ASSERT(entries_.is_empty()); // Root entry must be the first one.
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
- "",
- HeapObjectsMap::kInternalRootObjectId,
- 0);
- root_index_ = entry->index();
- ASSERT(root_index_ == 0);
- return entry;
+ return (root_entry_ = AddEntry(HeapEntry::kObject,
+ "",
+ HeapObjectsMap::kInternalRootObjectId,
+ 0,
+ children_count,
+ 0));
}
-HeapEntry* HeapSnapshot::AddGcRootsEntry() {
- ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
- "(GC roots)",
- HeapObjectsMap::kGcRootsObjectId,
- 0);
- gc_roots_index_ = entry->index();
- return entry;
+HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count,
+ int retainers_count) {
+ ASSERT(gc_roots_entry_ == NULL);
+ return (gc_roots_entry_ = AddEntry(HeapEntry::kObject,
+ "(GC roots)",
+ HeapObjectsMap::kGcRootsObjectId,
+ 0,
+ children_count,
+ retainers_count));
}
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
- ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
+ int children_count,
+ int retainers_count) {
+ ASSERT(gc_subroot_entries_[tag] == NULL);
ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
- HeapEntry* entry = AddEntry(
+ return (gc_subroot_entries_[tag] = AddEntry(
HeapEntry::kObject,
VisitorSynchronization::kTagNames[tag],
HeapObjectsMap::GetNthGcSubrootId(tag),
- 0);
- gc_subroot_indexes_[tag] = entry->index();
- return entry;
+ 0,
+ children_count,
+ retainers_count));
}
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size) {
- HeapEntry entry(this, type, name, id, size);
- entries_.Add(entry);
- return &entries_.last();
+ int size,
+ int children_count,
+ int retainers_count) {
+ HeapEntry* entry = GetNextEntryToInit();
+ entry->Init(this, type, name, id, size, children_count, retainers_count);
+ return entry;
}
-void HeapSnapshot::FillChildren() {
- ASSERT(children().is_empty());
- children().Allocate(edges().length());
- int children_index = 0;
- for (int i = 0; i < entries().length(); ++i) {
- HeapEntry* entry = &entries()[i];
- children_index = entry->set_children_index(children_index);
+void HeapSnapshot::SetDominatorsToSelf() {
+ for (int i = 0; i < entries_.length(); ++i) {
+ HeapEntry* entry = entries_[i];
+ if (entry->dominator() == NULL) entry->set_dominator(entry);
}
- ASSERT(edges().length() == children_index);
- for (int i = 0; i < edges().length(); ++i) {
- HeapGraphEdge* edge = &edges()[i];
- edge->ReplaceToIndexWithEntry(this);
- edge->from()->add_child(edge);
+}
+
+
+HeapEntry* HeapSnapshot::GetNextEntryToInit() {
+ if (entries_.length() > 0) {
+ HeapEntry* last_entry = entries_.last();
+ entries_.Add(reinterpret_cast<HeapEntry*>(
+ reinterpret_cast<char*>(last_entry) + last_entry->EntrySize()));
+ } else {
+ entries_.Add(reinterpret_cast<HeapEntry*>(raw_entries_));
}
+ ASSERT(reinterpret_cast<char*>(entries_.last()) <
+ (raw_entries_ + raw_entries_size_));
+ return entries_.last();
}
@@ -1226,10 +1287,7 @@ static int SortByIds(const T* entry1_ptr,
List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
if (sorted_entries_.is_empty()) {
- sorted_entries_.Allocate(entries_.length());
- for (int i = 0; i < entries_.length(); ++i) {
- sorted_entries_[i] = &entries_[i];
- }
+ sorted_entries_.AddAll(entries_);
sorted_entries_.Sort(SortByIds);
}
return &sorted_entries_;
@@ -1241,21 +1299,6 @@ void HeapSnapshot::Print(int max_depth) {
}
-template<typename T, class P>
-static size_t GetMemoryUsedByList(const List<T, P>& list) {
- return list.capacity() * sizeof(T);
-}
-
-
-size_t HeapSnapshot::RawSnapshotSize() const {
- return
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(edges_) +
- GetMemoryUsedByList(children_) +
- GetMemoryUsedByList(sorted_entries_);
-}
-
-
// We split IDs on evens for embedder objects (see
// HeapObjectsMap::GenerateId) and odds for native objects.
const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
@@ -1524,22 +1567,99 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
}
+HeapEntry* const HeapEntriesMap::kHeapEntryPlaceholder =
+ reinterpret_cast<HeapEntry*>(1);
+
HeapEntriesMap::HeapEntriesMap()
- : entries_(HeapThingsMatch) {
+ : entries_(HeapThingsMatch),
+ entries_count_(0),
+ total_children_count_(0),
+ total_retainers_count_(0) {
+}
+
+
+HeapEntriesMap::~HeapEntriesMap() {
+ for (HashMap::Entry* p = entries_.Start(); p != NULL; p = entries_.Next(p)) {
+ delete reinterpret_cast<EntryInfo*>(p->value);
+ }
+}
+
+
+void HeapEntriesMap::AllocateHeapEntryForMapEntry(HashMap::Entry* map_entry) {
+ EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(map_entry->value);
+ entry_info->entry = entry_info->allocator->AllocateEntry(
+ map_entry->key,
+ entry_info->children_count,
+ entry_info->retainers_count);
+ ASSERT(entry_info->entry != NULL);
+ ASSERT(entry_info->entry != kHeapEntryPlaceholder);
+ entry_info->children_count = 0;
+ entry_info->retainers_count = 0;
+}
+
+
+void HeapEntriesMap::AllocateEntries(HeapThing root_object) {
+ HashMap::Entry* root_entry =
+ entries_.Lookup(root_object, Hash(root_object), false);
+ ASSERT(root_entry != NULL);
+ // Make sure root entry is allocated first.
+ AllocateHeapEntryForMapEntry(root_entry);
+ void* root_entry_value = root_entry->value;
+ // Remove the root object from map while iterating through other entries.
+ entries_.Remove(root_object, Hash(root_object));
+ root_entry = NULL;
+
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ AllocateHeapEntryForMapEntry(p);
+ }
+
+ // Insert root entry back.
+ root_entry = entries_.Lookup(root_object, Hash(root_object), true);
+ root_entry->value = root_entry_value;
}
-int HeapEntriesMap::Map(HeapThing thing) {
+HeapEntry* HeapEntriesMap::Map(HeapThing thing) {
HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
- if (cache_entry == NULL) return HeapEntry::kNoEntry;
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
+ if (cache_entry != NULL) {
+ EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(cache_entry->value);
+ return entry_info->entry;
+ } else {
+ return NULL;
+ }
}
-void HeapEntriesMap::Pair(HeapThing thing, int entry) {
+void HeapEntriesMap::Pair(
+ HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry) {
HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
ASSERT(cache_entry->value == NULL);
- cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
+ cache_entry->value = new EntryInfo(entry, allocator);
+ ++entries_count_;
+}
+
+
+void HeapEntriesMap::CountReference(HeapThing from, HeapThing to,
+ int* prev_children_count,
+ int* prev_retainers_count) {
+ HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false);
+ HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
+ ASSERT(from_cache_entry != NULL);
+ ASSERT(to_cache_entry != NULL);
+ EntryInfo* from_entry_info =
+ reinterpret_cast<EntryInfo*>(from_cache_entry->value);
+ EntryInfo* to_entry_info =
+ reinterpret_cast<EntryInfo*>(to_cache_entry->value);
+ if (prev_children_count)
+ *prev_children_count = from_entry_info->children_count;
+ if (prev_retainers_count)
+ *prev_retainers_count = to_entry_info->retainers_count;
+ ++from_entry_info->children_count;
+ ++to_entry_info->retainers_count;
+ ++total_children_count_;
+ ++total_retainers_count_;
}
@@ -1556,14 +1676,20 @@ void HeapObjectsSet::Clear() {
bool HeapObjectsSet::Contains(Object* obj) {
if (!obj->IsHeapObject()) return false;
HeapObject* object = HeapObject::cast(obj);
- return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL;
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
+ return cache_entry != NULL;
}
void HeapObjectsSet::Insert(Object* obj) {
if (!obj->IsHeapObject()) return;
HeapObject* object = HeapObject::cast(obj);
- entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
+ if (cache_entry->value == NULL) {
+ cache_entry->value = HeapEntriesMap::kHeapEntryPlaceholder;
+ }
}
@@ -1571,9 +1697,12 @@ const char* HeapObjectsSet::GetTag(Object* obj) {
HeapObject* object = HeapObject::cast(obj);
HashMap::Entry* cache_entry =
entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
- return cache_entry != NULL
- ? reinterpret_cast<const char*>(cache_entry->value)
- : NULL;
+ if (cache_entry != NULL
+ && cache_entry->value != HeapEntriesMap::kHeapEntryPlaceholder) {
+ return reinterpret_cast<const char*>(cache_entry->value);
+ } else {
+ return NULL;
+ }
}
@@ -1615,83 +1744,129 @@ V8HeapExplorer::~V8HeapExplorer() {
}
-HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
- return AddEntry(reinterpret_cast<HeapObject*>(ptr));
+HeapEntry* V8HeapExplorer::AllocateEntry(
+ HeapThing ptr, int children_count, int retainers_count) {
+ return AddEntry(
+ reinterpret_cast<HeapObject*>(ptr), children_count, retainers_count);
}
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
+HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
+ int children_count,
+ int retainers_count) {
if (object == kInternalRootObject) {
- snapshot_->AddRootEntry();
- return snapshot_->root();
+ ASSERT(retainers_count == 0);
+ return snapshot_->AddRootEntry(children_count);
} else if (object == kGcRootsObject) {
- HeapEntry* entry = snapshot_->AddGcRootsEntry();
- return entry;
+ return snapshot_->AddGcRootsEntry(children_count, retainers_count);
} else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
- HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
- return entry;
+ return snapshot_->AddGcSubrootEntry(
+ GetGcSubrootOrder(object),
+ children_count,
+ retainers_count);
} else if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
const char* name = shared->bound() ? "native_bind" :
collection_->names()->GetName(String::cast(shared->name()));
- return AddEntry(object, HeapEntry::kClosure, name);
+ return AddEntry(object,
+ HeapEntry::kClosure,
+ name,
+ children_count,
+ retainers_count);
} else if (object->IsJSRegExp()) {
JSRegExp* re = JSRegExp::cast(object);
return AddEntry(object,
HeapEntry::kRegExp,
- collection_->names()->GetName(re->Pattern()));
+ collection_->names()->GetName(re->Pattern()),
+ children_count,
+ retainers_count);
} else if (object->IsJSObject()) {
- const char* name = collection_->names()->GetName(
- GetConstructorName(JSObject::cast(object)));
- if (object->IsJSGlobalObject()) {
- const char* tag = objects_tags_.GetTag(object);
- if (tag != NULL) {
- name = collection_->names()->GetFormatted("%s / %s", name, tag);
- }
- }
- return AddEntry(object, HeapEntry::kObject, name);
+ return AddEntry(object,
+ HeapEntry::kObject,
+ "",
+ children_count,
+ retainers_count);
} else if (object->IsString()) {
return AddEntry(object,
HeapEntry::kString,
- collection_->names()->GetName(String::cast(object)));
+ collection_->names()->GetName(String::cast(object)),
+ children_count,
+ retainers_count);
} else if (object->IsCode()) {
- return AddEntry(object, HeapEntry::kCode, "");
+ return AddEntry(object,
+ HeapEntry::kCode,
+ "",
+ children_count,
+ retainers_count);
} else if (object->IsSharedFunctionInfo()) {
- String* name = String::cast(SharedFunctionInfo::cast(object)->name());
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
return AddEntry(object,
HeapEntry::kCode,
- collection_->names()->GetName(name));
+ collection_->names()->GetName(String::cast(shared->name())),
+ children_count,
+ retainers_count);
} else if (object->IsScript()) {
- Object* name = Script::cast(object)->name();
+ Script* script = Script::cast(object);
return AddEntry(object,
HeapEntry::kCode,
- name->IsString()
- ? collection_->names()->GetName(String::cast(name))
- : "");
+ script->name()->IsString() ?
+ collection_->names()->GetName(
+ String::cast(script->name()))
+ : "",
+ children_count,
+ retainers_count);
} else if (object->IsGlobalContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / GlobalContext");
+ return AddEntry(object,
+ HeapEntry::kHidden,
+ "system / GlobalContext",
+ children_count,
+ retainers_count);
} else if (object->IsContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / Context");
+ return AddEntry(object,
+ HeapEntry::kHidden,
+ "system / Context",
+ children_count,
+ retainers_count);
} else if (object->IsFixedArray() ||
object->IsFixedDoubleArray() ||
object->IsByteArray() ||
object->IsExternalArray()) {
- return AddEntry(object, HeapEntry::kArray, "");
+ const char* tag = objects_tags_.GetTag(object);
+ return AddEntry(object,
+ HeapEntry::kArray,
+ tag != NULL ? tag : "",
+ children_count,
+ retainers_count);
} else if (object->IsHeapNumber()) {
- return AddEntry(object, HeapEntry::kHeapNumber, "number");
+ return AddEntry(object,
+ HeapEntry::kHeapNumber,
+ "number",
+ children_count,
+ retainers_count);
}
- return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
+ return AddEntry(object,
+ HeapEntry::kHidden,
+ GetSystemEntryName(object),
+ children_count,
+ retainers_count);
}
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
HeapEntry::Type type,
- const char* name) {
+ const char* name,
+ int children_count,
+ int retainers_count) {
int object_size = object->Size();
SnapshotObjectId object_id =
collection_->GetObjectId(object->address(), object_size);
- return snapshot_->AddEntry(type, name, object_id, object_size);
+ return snapshot_->AddEntry(type,
+ name,
+ object_id,
+ object_size,
+ children_count,
+ retainers_count);
}
@@ -1760,10 +1935,10 @@ class IndexedReferencesExtractor : public ObjectVisitor {
public:
IndexedReferencesExtractor(V8HeapExplorer* generator,
HeapObject* parent_obj,
- int parent)
+ HeapEntry* parent_entry)
: generator_(generator),
parent_obj_(parent_obj),
- parent_(parent),
+ parent_(parent_entry),
next_index_(1) {
}
void VisitPointers(Object** start, Object** end) {
@@ -1792,15 +1967,14 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
V8HeapExplorer* generator_;
HeapObject* parent_obj_;
- int parent_;
+ HeapEntry* parent_;
int next_index_;
};
void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
- HeapEntry* heap_entry = GetEntry(obj);
- if (heap_entry == NULL) return; // No interest in this object.
- int entry = heap_entry->index();
+ HeapEntry* entry = GetEntry(obj);
+ if (entry == NULL) return; // No interest in this object.
bool extract_indexed_refs = true;
if (obj->IsJSGlobalProxy()) {
@@ -1852,7 +2026,7 @@ void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) {
void V8HeapExplorer::ExtractJSObjectReferences(
- int entry, JSObject* js_obj) {
+ HeapEntry* entry, JSObject* js_obj) {
HeapObject* obj = js_obj;
ExtractClosureReferences(js_obj, entry);
ExtractPropertyReferences(js_obj, entry);
@@ -1921,7 +2095,7 @@ void V8HeapExplorer::ExtractJSObjectReferences(
}
-void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
+void V8HeapExplorer::ExtractStringReferences(HeapEntry* entry, String* string) {
if (string->IsConsString()) {
ConsString* cs = ConsString::cast(string);
SetInternalReference(cs, entry, "first", cs->first());
@@ -1933,7 +2107,8 @@ void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
}
-void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
+void V8HeapExplorer::ExtractContextReferences(
+ HeapEntry* entry, Context* context) {
#define EXTRACT_CONTEXT_FIELD(index, type, name) \
SetInternalReference(context, entry, #name, context->get(Context::index), \
FixedArray::OffsetOfElementAt(Context::index));
@@ -1959,7 +2134,7 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
}
-void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
+void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map* map) {
SetInternalReference(map, entry,
"prototype", map->prototype(), Map::kPrototypeOffset);
SetInternalReference(map, entry,
@@ -1971,16 +2146,10 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
"descriptors", map->instance_descriptors(),
Map::kInstanceDescriptorsOrBitField3Offset);
}
- if (map->unchecked_prototype_transitions()->IsFixedArray()) {
- TagObject(map->prototype_transitions(), "(prototype transitions)");
- SetInternalReference(map, entry,
- "prototype_transitions", map->prototype_transitions(),
- Map::kPrototypeTransitionsOrBackPointerOffset);
- } else {
- SetInternalReference(map, entry,
- "back_pointer", map->GetBackPointer(),
- Map::kPrototypeTransitionsOrBackPointerOffset);
- }
+ TagObject(map->prototype_transitions(), "(prototype transitions)");
+ SetInternalReference(map, entry,
+ "prototype_transitions", map->prototype_transitions(),
+ Map::kPrototypeTransitionsOffset);
SetInternalReference(map, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
@@ -1988,7 +2157,7 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
- int entry, SharedFunctionInfo* shared) {
+ HeapEntry* entry, SharedFunctionInfo* shared) {
HeapObject* obj = shared;
SetInternalReference(obj, entry,
"name", shared->name(),
@@ -2030,7 +2199,7 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
}
-void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
+void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script* script) {
HeapObject* obj = script;
SetInternalReference(obj, entry,
"source", script->source(),
@@ -2052,7 +2221,7 @@ void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
void V8HeapExplorer::ExtractCodeCacheReferences(
- int entry, CodeCache* code_cache) {
+ HeapEntry* entry, CodeCache* code_cache) {
TagObject(code_cache->default_cache(), "(default code cache)");
SetInternalReference(code_cache, entry,
"default_cache", code_cache->default_cache(),
@@ -2064,7 +2233,7 @@ void V8HeapExplorer::ExtractCodeCacheReferences(
}
-void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
+void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code* code) {
TagObject(code->relocation_info(), "(code relocation info)");
SetInternalReference(code, entry,
"relocation_info", code->relocation_info(),
@@ -2086,12 +2255,13 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences(
- int entry, JSGlobalPropertyCell* cell) {
+ HeapEntry* entry, JSGlobalPropertyCell* cell) {
SetInternalReference(cell, entry, "value", cell->value());
}
-void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
+void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
+ HeapEntry* entry) {
if (!js_obj->IsJSFunction()) return;
JSFunction* func = JSFunction::cast(js_obj);
@@ -2133,7 +2303,8 @@ void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
}
-void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
+void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
+ HeapEntry* entry) {
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -2206,8 +2377,9 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
}
-void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
- if (js_obj->HasFastObjectElements()) {
+void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
+ HeapEntry* entry) {
+ if (js_obj->HasFastElements()) {
FixedArray* elements = FixedArray::cast(js_obj->elements());
int length = js_obj->IsJSArray() ?
Smi::cast(JSArray::cast(js_obj)->length())->value() :
@@ -2232,7 +2404,8 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
}
-void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
+void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
+ HeapEntry* entry) {
int length = js_obj->GetInternalFieldCount();
for (int i = 0; i < length; ++i) {
Object* o = js_obj->GetInternalField(i);
@@ -2358,7 +2531,6 @@ bool V8HeapExplorer::IterateAndExtractReferences(
filler_ = NULL;
return false;
}
-
SetRootGcRootsReference();
RootsReferencesExtractor extractor;
heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
@@ -2366,7 +2538,33 @@ bool V8HeapExplorer::IterateAndExtractReferences(
heap_->IterateRoots(&extractor, VISIT_ALL);
extractor.FillReferences(this);
filler_ = NULL;
- return progress_->ProgressReport(true);
+ return progress_->ProgressReport(false);
+}
+
+
+bool V8HeapExplorer::IterateAndSetObjectNames(SnapshotFillerInterface* filler) {
+ HeapIterator iterator(HeapIterator::kFilterUnreachable);
+ filler_ = filler;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ SetObjectName(obj);
+ }
+ return true;
+}
+
+
+void V8HeapExplorer::SetObjectName(HeapObject* object) {
+ if (!object->IsJSObject() || object->IsJSRegExp() || object->IsJSFunction()) {
+ return;
+ }
+ const char* name = collection_->names()->GetName(
+ GetConstructorName(JSObject::cast(object)));
+ if (object->IsJSGlobalObject()) {
+ const char* tag = objects_tags_.GetTag(object);
+ if (tag != NULL) {
+ name = collection_->names()->GetFormatted("%s / %s", name, tag);
+ }
+ }
+ GetEntry(object)->set_name(name);
}
@@ -2388,49 +2586,55 @@ bool V8HeapExplorer::IsEssentialObject(Object* object) {
void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
- int parent_entry,
+ HeapEntry* parent_entry,
String* reference_name,
Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
+ parent_obj,
parent_entry,
collection_->names()->GetName(reference_name),
+ child_obj,
child_entry);
}
}
void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
- int parent_entry,
+ HeapEntry* parent_entry,
const char* reference_name,
Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kShortcut,
+ parent_obj,
parent_entry,
reference_name,
+ child_obj,
child_entry);
}
}
void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
- int parent_entry,
+ HeapEntry* parent_entry,
int index,
Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetIndexedReference(HeapGraphEdge::kElement,
+ parent_obj,
parent_entry,
index,
+ child_obj,
child_entry);
}
}
void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
+ HeapEntry* parent_entry,
const char* reference_name,
Object* child_obj,
int field_offset) {
@@ -2438,16 +2642,16 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
if (child_entry == NULL) return;
if (IsEssentialObject(child_obj)) {
filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
+ parent_obj, parent_entry,
reference_name,
- child_entry);
+ child_obj, child_entry);
}
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
+ HeapEntry* parent_entry,
int index,
Object* child_obj,
int field_offset) {
@@ -2455,38 +2659,42 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
if (child_entry == NULL) return;
if (IsEssentialObject(child_obj)) {
filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
+ parent_obj, parent_entry,
collection_->names()->GetName(index),
- child_entry);
+ child_obj, child_entry);
}
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
- int parent_entry,
+ HeapEntry* parent_entry,
int index,
Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL && IsEssentialObject(child_obj)) {
filler_->SetIndexedReference(HeapGraphEdge::kHidden,
+ parent_obj,
parent_entry,
index,
+ child_obj,
child_entry);
}
}
void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
- int parent_entry,
+ HeapEntry* parent_entry,
int index,
Object* child_obj,
int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetIndexedReference(HeapGraphEdge::kWeak,
+ parent_obj,
parent_entry,
index,
+ child_obj,
child_entry);
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
@@ -2494,7 +2702,7 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
- int parent_entry,
+ HeapEntry* parent_entry,
String* reference_name,
Object* child_obj,
const char* name_format_string,
@@ -2511,8 +2719,10 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
collection_->names()->GetName(reference_name);
filler_->SetNamedReference(type,
+ parent_obj,
parent_entry,
name,
+ child_obj,
child_entry);
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
@@ -2520,14 +2730,16 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
void V8HeapExplorer::SetPropertyShortcutReference(HeapObject* parent_obj,
- int parent_entry,
+ HeapEntry* parent_entry,
String* reference_name,
Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kShortcut,
+ parent_obj,
parent_entry,
collection_->names()->GetName(reference_name),
+ child_obj,
child_entry);
}
}
@@ -2536,8 +2748,8 @@ void V8HeapExplorer::SetPropertyShortcutReference(HeapObject* parent_obj,
void V8HeapExplorer::SetRootGcRootsReference() {
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- snapshot_->gc_roots());
+ kInternalRootObject, snapshot_->root(),
+ kGcRootsObject, snapshot_->gc_roots());
}
@@ -2546,16 +2758,16 @@ void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
ASSERT(child_entry != NULL);
filler_->SetNamedAutoIndexReference(
HeapGraphEdge::kShortcut,
- snapshot_->root()->index(),
- child_entry);
+ kInternalRootObject, snapshot_->root(),
+ child_obj, child_entry);
}
void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
- snapshot_->gc_roots()->index(),
- snapshot_->gc_subroot(tag));
+ kGcRootsObject, snapshot_->gc_roots(),
+ GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag));
}
@@ -2567,14 +2779,14 @@ void V8HeapExplorer::SetGcSubrootReference(
if (name != NULL) {
filler_->SetNamedReference(
HeapGraphEdge::kInternal,
- snapshot_->gc_subroot(tag)->index(),
+ GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
name,
- child_entry);
+ child_obj, child_entry);
} else {
filler_->SetIndexedAutoIndexReference(
is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
+ GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
+ child_obj, child_entry);
}
}
}
@@ -2601,10 +2813,7 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
if (IsEssentialObject(obj)) {
- HeapEntry* entry = GetEntry(obj);
- if (entry->name()[0] == '\0') {
- entry->set_name(tag);
- }
+ objects_tags_.SetTag(obj, tag);
}
}
@@ -2694,7 +2903,8 @@ class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
collection_(snapshot_->collection()),
entries_type_(entries_type) {
}
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
+ virtual HeapEntry* AllocateEntry(
+ HeapThing ptr, int children_count, int retainers_count);
private:
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
@@ -2702,19 +2912,23 @@ class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
};
-HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
+HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(
+ HeapThing ptr, int children_count, int retainers_count) {
v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
intptr_t elements = info->GetElementCount();
intptr_t size = info->GetSizeInBytes();
- const char* name = elements != -1
- ? collection_->names()->GetFormatted(
- "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
- : collection_->names()->GetCopy(info->GetLabel());
return snapshot_->AddEntry(
entries_type_,
- name,
+ elements != -1 ?
+ collection_->names()->GetFormatted(
+ "%s / %" V8_PTR_PREFIX "d entries",
+ info->GetLabel(),
+ info->GetElementCount()) :
+ collection_->names()->GetCopy(info->GetLabel()),
HeapObjectsMap::GenerateId(info),
- size != -1 ? static_cast<int>(size) : 0);
+ size != -1 ? static_cast<int>(size) : 0,
+ children_count,
+ retainers_count);
}
@@ -2795,9 +3009,9 @@ void NativeObjectsExplorer::FillImplicitReferences() {
for (int i = 0; i < groups->length(); ++i) {
ImplicitRefGroup* group = groups->at(i);
HeapObject* parent = *group->parent_;
- int parent_entry =
- filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
- ASSERT(parent_entry != HeapEntry::kNoEntry);
+ HeapEntry* parent_entry =
+ filler_->FindOrAddEntry(parent, native_entries_allocator_);
+ ASSERT(parent_entry != NULL);
Object*** children = group->children_;
for (size_t j = 0; j < group->length_; ++j) {
Object* child = *children[j];
@@ -2805,9 +3019,9 @@ void NativeObjectsExplorer::FillImplicitReferences() {
filler_->FindOrAddEntry(child, native_entries_allocator_);
filler_->SetNamedReference(
HeapGraphEdge::kInternal,
- parent_entry,
+ parent, parent_entry,
"native",
- child_entry);
+ child, child_entry);
}
}
}
@@ -2885,9 +3099,8 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
HEAP->HashSeed());
HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
hash, true);
- if (entry->value == NULL) {
+ if (entry->value == NULL)
entry->value = new NativeGroupRetainedObjectInfo(label);
- }
return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
}
@@ -2903,8 +3116,8 @@ void NativeObjectsExplorer::SetNativeRootReference(
filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
filler_->SetNamedAutoIndexReference(
HeapGraphEdge::kInternal,
- group_entry->index(),
- child_entry);
+ group_info, group_entry,
+ info, child_entry);
}
@@ -2916,12 +3129,12 @@ void NativeObjectsExplorer::SetWrapperNativeReferences(
filler_->FindOrAddEntry(info, native_entries_allocator_);
ASSERT(info_entry != NULL);
filler_->SetNamedReference(HeapGraphEdge::kInternal,
- wrapper_entry->index(),
+ wrapper, wrapper_entry,
"native",
- info_entry);
+ info, info_entry);
filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- info_entry->index(),
- wrapper_entry);
+ info, info_entry,
+ wrapper, wrapper_entry);
}
@@ -2936,8 +3149,8 @@ void NativeObjectsExplorer::SetRootNativeRootsReference() {
ASSERT(group_entry != NULL);
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- group_entry);
+ V8HeapExplorer::kInternalRootObject, snapshot_->root(),
+ group_info, group_entry);
}
}
@@ -2952,6 +3165,56 @@ void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
}
+class SnapshotCounter : public SnapshotFillerInterface {
+ public:
+ explicit SnapshotCounter(HeapEntriesMap* entries) : entries_(entries) { }
+ HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ entries_->Pair(ptr, allocator, HeapEntriesMap::kHeapEntryPlaceholder);
+ return HeapEntriesMap::kHeapEntryPlaceholder;
+ }
+ HeapEntry* FindEntry(HeapThing ptr) {
+ return entries_->Map(ptr);
+ }
+ HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(ptr);
+ return entry != NULL ? entry : AddEntry(ptr, allocator);
+ }
+ void SetIndexedReference(HeapGraphEdge::Type,
+ HeapThing parent_ptr,
+ HeapEntry*,
+ int,
+ HeapThing child_ptr,
+ HeapEntry*) {
+ entries_->CountReference(parent_ptr, child_ptr);
+ }
+ void SetIndexedAutoIndexReference(HeapGraphEdge::Type,
+ HeapThing parent_ptr,
+ HeapEntry*,
+ HeapThing child_ptr,
+ HeapEntry*) {
+ entries_->CountReference(parent_ptr, child_ptr);
+ }
+ void SetNamedReference(HeapGraphEdge::Type,
+ HeapThing parent_ptr,
+ HeapEntry*,
+ const char*,
+ HeapThing child_ptr,
+ HeapEntry*) {
+ entries_->CountReference(parent_ptr, child_ptr);
+ }
+ void SetNamedAutoIndexReference(HeapGraphEdge::Type,
+ HeapThing parent_ptr,
+ HeapEntry*,
+ HeapThing child_ptr,
+ HeapEntry*) {
+ entries_->CountReference(parent_ptr, child_ptr);
+ }
+
+ private:
+ HeapEntriesMap* entries_;
+};
+
+
class SnapshotFiller : public SnapshotFillerInterface {
public:
explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
@@ -2959,48 +3222,64 @@ class SnapshotFiller : public SnapshotFillerInterface {
collection_(snapshot->collection()),
entries_(entries) { }
HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = allocator->AllocateEntry(ptr);
- entries_->Pair(ptr, entry->index());
- return entry;
+ UNREACHABLE();
+ return NULL;
}
HeapEntry* FindEntry(HeapThing ptr) {
- int index = entries_->Map(ptr);
- return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
+ return entries_->Map(ptr);
}
HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
HeapEntry* entry = FindEntry(ptr);
return entry != NULL ? entry : AddEntry(ptr, allocator);
}
void SetIndexedReference(HeapGraphEdge::Type type,
- int parent,
+ HeapThing parent_ptr,
+ HeapEntry* parent_entry,
int index,
+ HeapThing child_ptr,
HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetIndexedReference(type, index, child_entry);
+ int child_index, retainer_index;
+ entries_->CountReference(
+ parent_ptr, child_ptr, &child_index, &retainer_index);
+ parent_entry->SetIndexedReference(
+ type, child_index, index, child_entry, retainer_index);
}
void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
+ HeapThing parent_ptr,
+ HeapEntry* parent_entry,
+ HeapThing child_ptr,
HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetIndexedReference(type, index, child_entry);
+ int child_index, retainer_index;
+ entries_->CountReference(
+ parent_ptr, child_ptr, &child_index, &retainer_index);
+ parent_entry->SetIndexedReference(
+ type, child_index, child_index + 1, child_entry, retainer_index);
}
void SetNamedReference(HeapGraphEdge::Type type,
- int parent,
+ HeapThing parent_ptr,
+ HeapEntry* parent_entry,
const char* reference_name,
+ HeapThing child_ptr,
HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetNamedReference(type, reference_name, child_entry);
+ int child_index, retainer_index;
+ entries_->CountReference(
+ parent_ptr, child_ptr, &child_index, &retainer_index);
+ parent_entry->SetNamedReference(
+ type, child_index, reference_name, child_entry, retainer_index);
}
void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
+ HeapThing parent_ptr,
+ HeapEntry* parent_entry,
+ HeapThing child_ptr,
HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetNamedReference(
- type,
- collection_->names()->GetName(index),
- child_entry);
+ int child_index, retainer_index;
+ entries_->CountReference(
+ parent_ptr, child_ptr, &child_index, &retainer_index);
+ parent_entry->SetNamedReference(type,
+ child_index,
+ collection_->names()->GetName(child_index + 1),
+ child_entry,
+ retainer_index);
}
private:
@@ -3050,17 +3329,35 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
debug_heap->Verify();
#endif
- SetProgressTotal(1); // 1 pass.
+ SetProgressTotal(2); // 2 passes.
#ifdef DEBUG
debug_heap->Verify();
#endif
+ // Pass 1. Iterate heap contents to count entries and references.
+ if (!CountEntriesAndReferences()) return false;
+
+#ifdef DEBUG
+ debug_heap->Verify();
+#endif
+
+ // Allocate memory for entries and references.
+ snapshot_->AllocateEntries(entries_.entries_count(),
+ entries_.total_children_count(),
+ entries_.total_retainers_count());
+
+ // Allocate heap objects to entries hash map.
+ entries_.AllocateEntries(V8HeapExplorer::kInternalRootObject);
+
+ // Pass 2. Fill references.
if (!FillReferences()) return false;
- snapshot_->FillChildren();
snapshot_->RememberLastJSObjectId();
+ if (!SetEntriesDominators()) return false;
+ if (!CalculateRetainedSizes()) return false;
+
progress_counter_ = progress_total_;
if (!ProgressReport(true)) return false;
return true;
@@ -3087,18 +3384,213 @@ bool HeapSnapshotGenerator::ProgressReport(bool force) {
void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
if (control_ == NULL) return;
HeapIterator iterator(HeapIterator::kFilterUnreachable);
- progress_total_ = iterations_count * (
+ progress_total_ = (
v8_heap_explorer_.EstimateObjectsCount(&iterator) +
- dom_explorer_.EstimateObjectsCount());
+ dom_explorer_.EstimateObjectsCount()) * iterations_count;
progress_counter_ = 0;
}
+bool HeapSnapshotGenerator::CountEntriesAndReferences() {
+ SnapshotCounter counter(&entries_);
+ v8_heap_explorer_.AddRootEntries(&counter);
+ return v8_heap_explorer_.IterateAndExtractReferences(&counter)
+ && dom_explorer_.IterateAndExtractReferences(&counter);
+}
+
+
bool HeapSnapshotGenerator::FillReferences() {
SnapshotFiller filler(snapshot_, &entries_);
- v8_heap_explorer_.AddRootEntries(&filler);
+ // IterateAndExtractReferences cannot set object names because
+ // it makes call to JSObject::LocalLookupRealNamedProperty which
+ // in turn may relocate objects in property maps thus changing the heap
+ // layout and affecting retainer counts. This is not acceptable because
+ // number of retainers must not change between count and fill passes.
+ // To avoid this there's a separate postpass that set object names.
return v8_heap_explorer_.IterateAndExtractReferences(&filler)
- && dom_explorer_.IterateAndExtractReferences(&filler);
+ && dom_explorer_.IterateAndExtractReferences(&filler)
+ && v8_heap_explorer_.IterateAndSetObjectNames(&filler);
+}
+
+
+bool HeapSnapshotGenerator::IsUserGlobalReference(const HeapGraphEdge& edge) {
+ ASSERT(edge.from() == snapshot_->root());
+ return edge.type() == HeapGraphEdge::kShortcut;
+}
+
+
+void HeapSnapshotGenerator::MarkUserReachableObjects() {
+ List<HeapEntry*> worklist;
+
+ Vector<HeapGraphEdge> children = snapshot_->root()->children();
+ for (int i = 0; i < children.length(); ++i) {
+ if (IsUserGlobalReference(children[i])) {
+ worklist.Add(children[i].to());
+ }
+ }
+
+ while (!worklist.is_empty()) {
+ HeapEntry* entry = worklist.RemoveLast();
+ if (entry->user_reachable()) continue;
+ entry->set_user_reachable();
+ Vector<HeapGraphEdge> children = entry->children();
+ for (int i = 0; i < children.length(); ++i) {
+ HeapEntry* child = children[i].to();
+ if (!child->user_reachable()) {
+ worklist.Add(child);
+ }
+ }
+ }
+}
+
+
+static bool IsRetainingEdge(HeapGraphEdge* edge) {
+ if (edge->type() == HeapGraphEdge::kShortcut) return false;
+ // The edge is not retaining if it goes from system domain
+ // (i.e. an object not reachable from window) to the user domain
+ // (i.e. a reachable object).
+ return edge->from()->user_reachable()
+ || !edge->to()->user_reachable();
+}
+
+
+void HeapSnapshotGenerator::FillPostorderIndexes(
+ Vector<HeapEntry*>* entries) {
+ snapshot_->ClearPaint();
+ int current_entry = 0;
+ List<HeapEntry*> nodes_to_visit;
+ HeapEntry* root = snapshot_->root();
+ nodes_to_visit.Add(root);
+ snapshot_->root()->paint();
+ while (!nodes_to_visit.is_empty()) {
+ HeapEntry* entry = nodes_to_visit.last();
+ Vector<HeapGraphEdge> children = entry->children();
+ bool has_new_edges = false;
+ for (int i = 0; i < children.length(); ++i) {
+ if (entry != root && !IsRetainingEdge(&children[i])) continue;
+ HeapEntry* child = children[i].to();
+ if (!child->painted()) {
+ nodes_to_visit.Add(child);
+ child->paint();
+ has_new_edges = true;
+ }
+ }
+ if (!has_new_edges) {
+ entry->set_ordered_index(current_entry);
+ (*entries)[current_entry++] = entry;
+ nodes_to_visit.RemoveLast();
+ }
+ }
+ ASSERT_EQ(current_entry, entries->length());
+}
+
+
+static int Intersect(int i1, int i2, const Vector<int>& dominators) {
+ int finger1 = i1, finger2 = i2;
+ while (finger1 != finger2) {
+ while (finger1 < finger2) finger1 = dominators[finger1];
+ while (finger2 < finger1) finger2 = dominators[finger2];
+ }
+ return finger1;
+}
+
+
+// The algorithm is based on the article:
+// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
+// Softw. Pract. Exper. 4 (2001), pp. 1-10.
+bool HeapSnapshotGenerator::BuildDominatorTree(
+ const Vector<HeapEntry*>& entries,
+ Vector<int>* dominators) {
+ if (entries.length() == 0) return true;
+ HeapEntry* root = snapshot_->root();
+ const int entries_length = entries.length(), root_index = entries_length - 1;
+ static const int kNoDominator = -1;
+ for (int i = 0; i < root_index; ++i) (*dominators)[i] = kNoDominator;
+ (*dominators)[root_index] = root_index;
+
+ // The affected array is used to mark entries which dominators
+ // have to be racalculated because of changes in their retainers.
+ ScopedVector<bool> affected(entries_length);
+ for (int i = 0; i < affected.length(); ++i) affected[i] = false;
+ // Mark the root direct children as affected.
+ Vector<HeapGraphEdge> children = entries[root_index]->children();
+ for (int i = 0; i < children.length(); ++i) {
+ affected[children[i].to()->ordered_index()] = true;
+ }
+
+ bool changed = true;
+ while (changed) {
+ changed = false;
+ if (!ProgressReport(true)) return false;
+ for (int i = root_index - 1; i >= 0; --i) {
+ if (!affected[i]) continue;
+ affected[i] = false;
+ // If dominator of the entry has already been set to root,
+ // then it can't propagate any further.
+ if ((*dominators)[i] == root_index) continue;
+ int new_idom_index = kNoDominator;
+ Vector<HeapGraphEdge*> rets = entries[i]->retainers();
+ for (int j = 0; j < rets.length(); ++j) {
+ if (rets[j]->from() != root && !IsRetainingEdge(rets[j])) continue;
+ int ret_index = rets[j]->from()->ordered_index();
+ if (dominators->at(ret_index) != kNoDominator) {
+ new_idom_index = new_idom_index == kNoDominator
+ ? ret_index
+ : Intersect(ret_index, new_idom_index, *dominators);
+ // If idom has already reached the root, it doesn't make sense
+ // to check other retainers.
+ if (new_idom_index == root_index) break;
+ }
+ }
+ if (new_idom_index != kNoDominator
+ && dominators->at(i) != new_idom_index) {
+ (*dominators)[i] = new_idom_index;
+ changed = true;
+ Vector<HeapGraphEdge> children = entries[i]->children();
+ for (int j = 0; j < children.length(); ++j) {
+ affected[children[j].to()->ordered_index()] = true;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+
+bool HeapSnapshotGenerator::SetEntriesDominators() {
+ MarkUserReachableObjects();
+ // This array is used for maintaining postorder of nodes.
+ ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length());
+ FillPostorderIndexes(&ordered_entries);
+ ScopedVector<int> dominators(ordered_entries.length());
+ if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
+ for (int i = 0; i < ordered_entries.length(); ++i) {
+ ASSERT(dominators[i] >= 0);
+ ordered_entries[i]->set_dominator(ordered_entries[dominators[i]]);
+ }
+ return true;
+}
+
+
+bool HeapSnapshotGenerator::CalculateRetainedSizes() {
+ // As for the dominators tree we only know parent nodes, not
+ // children, to sum up total sizes we "bubble" node's self size
+ // adding it to all of its parents.
+ List<HeapEntry*>& entries = *snapshot_->entries();
+ for (int i = 0; i < entries.length(); ++i) {
+ HeapEntry* entry = entries[i];
+ entry->set_retained_size(entry->self_size());
+ }
+ for (int i = 0; i < entries.length(); ++i) {
+ HeapEntry* entry = entries[i];
+ int entry_size = entry->self_size();
+ for (HeapEntry* dominator = entry->dominator();
+ dominator != entry;
+ entry = dominator, dominator = entry->dominator()) {
+ dominator->add_retained_size(entry_size);
+ }
+ }
+ return true;
}
@@ -3198,23 +3690,19 @@ class OutputStreamWriter {
};
-// type, name|index, to_node.
-const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
-// type, name, id, self_size, children_index.
-const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
-
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
ASSERT(writer_ == NULL);
writer_ = new OutputStreamWriter(stream);
HeapSnapshot* original_snapshot = NULL;
- if (snapshot_->RawSnapshotSize() >=
+ if (snapshot_->raw_entries_size() >=
SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
// The snapshot is too big. Serialize a fake snapshot.
original_snapshot = snapshot_;
snapshot_ = CreateFakeSnapshot();
}
-
+ // Since nodes graph is cyclic, we need the first pass to enumerate
+ // them. Strings can be serialized in one pass.
SerializeImpl();
delete writer_;
@@ -3232,23 +3720,42 @@ HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
HeapSnapshot::kFull,
snapshot_->title(),
snapshot_->uid());
- result->AddRootEntry();
+ result->AllocateEntries(2, 1, 0);
+ HeapEntry* root = result->AddRootEntry(1);
const char* text = snapshot_->collection()->names()->GetFormatted(
"The snapshot is too big. "
"Maximum snapshot size is %" V8_PTR_PREFIX "u MB. "
"Actual snapshot size is %" V8_PTR_PREFIX "u MB.",
SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
- (snapshot_->RawSnapshotSize() + MB - 1) / MB);
- HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
- result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
- result->FillChildren();
+ (snapshot_->raw_entries_size() + MB - 1) / MB);
+ HeapEntry* message = result->AddEntry(
+ HeapEntry::kString, text, 0, 4, 0, 0);
+ root->SetUnidirElementReference(0, 1, message);
+ result->SetDominatorsToSelf();
return result;
}
+void HeapSnapshotJSONSerializer::CalculateNodeIndexes(
+ const List<HeapEntry*>& nodes) {
+ // type,name,id,self_size,retained_size,dominator,children_index.
+ const int node_fields_count = 7;
+ // Root must be the first.
+ ASSERT(nodes.first() == snapshot_->root());
+ // Rewrite node indexes, so they refer to actual array positions. Do this
+ // only once.
+ if (nodes[0]->entry_index() == -1) {
+ int index = 0;
+ for (int i = 0; i < nodes.length(); ++i, index += node_fields_count) {
+ nodes[i]->set_entry_index(index);
+ }
+ }
+}
+
+
void HeapSnapshotJSONSerializer::SerializeImpl() {
- List<HeapEntry>& nodes = snapshot_->entries();
- ASSERT(0 == snapshot_->root()->index());
+ List<HeapEntry*>& nodes = *(snapshot_->entries());
+ CalculateNodeIndexes(nodes);
writer_->AddCharacter('{');
writer_->AddString("\"snapshot\":{");
SerializeSnapshot();
@@ -3281,9 +3788,16 @@ int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
}
-static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
+// This function won't work correctly for MIN_INT but this is not
+// a problem in case of heap snapshots serialization.
+static int itoa(int value, const Vector<char>& buffer, int buffer_pos) {
+ if (value < 0) {
+ buffer[buffer_pos++] = '-';
+ value = -value;
+ }
+
int number_of_digits = 0;
- unsigned t = value;
+ int t = value;
do {
++number_of_digits;
} while (t /= 10);
@@ -3313,23 +3827,23 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
if (!first_edge) {
buffer[buffer_pos++] = ',';
}
- buffer_pos = utoa(edge->type(), buffer, buffer_pos);
+ buffer_pos = itoa(edge->type(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
+ buffer_pos = itoa(edge_name_or_index, buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
+ buffer_pos = itoa(edge->to()->entry_index(), buffer, buffer_pos);
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
}
-void HeapSnapshotJSONSerializer::SerializeEdges(const List<HeapEntry>& nodes) {
+void HeapSnapshotJSONSerializer::SerializeEdges(const List<HeapEntry*>& nodes) {
bool first_edge = true;
for (int i = 0; i < nodes.length(); ++i) {
- HeapEntry* entry = &nodes[i];
- Vector<HeapGraphEdge*> children = entry->children();
+ HeapEntry* entry = nodes[i];
+ Vector<HeapGraphEdge> children = entry->children();
for (int j = 0; j < children.length(); ++j) {
- SerializeEdge(children[j], first_edge);
+ SerializeEdge(&children[j], first_edge);
first_edge = false;
if (writer_->aborted()) return;
}
@@ -3339,36 +3853,42 @@ void HeapSnapshotJSONSerializer::SerializeEdges(const List<HeapEntry>& nodes) {
void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry,
int edges_index) {
- // The buffer needs space for 5 uint32_t, 5 commas, \n and \0
+ // The buffer needs space for 6 ints, 1 uint32_t, 7 commas, \n and \0
static const int kBufferSize =
- 5 * MaxDecimalDigitsIn<sizeof(uint32_t)>::kUnsigned // NOLINT
- + 5 + 1 + 1;
+ 6 * MaxDecimalDigitsIn<sizeof(int)>::kSigned // NOLINT
+ + MaxDecimalDigitsIn<sizeof(uint32_t)>::kUnsigned // NOLINT
+ + 7 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
- if (entry_index(entry) != 0) {
+ buffer[buffer_pos++] = '\n';
+ if (entry->entry_index() != 0) {
buffer[buffer_pos++] = ',';
}
- buffer_pos = utoa(entry->type(), buffer, buffer_pos);
+ buffer_pos = itoa(entry->type(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos);
+ buffer_pos = itoa(GetStringId(entry->name()), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->id(), buffer, buffer_pos);
+ buffer_pos = itoa(entry->id(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
+ buffer_pos = itoa(entry->self_size(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(edges_index, buffer, buffer_pos);
- buffer[buffer_pos++] = '\n';
+ buffer_pos = itoa(entry->retained_size(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(entry->dominator()->entry_index(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = itoa(edges_index, buffer, buffer_pos);
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
}
-void HeapSnapshotJSONSerializer::SerializeNodes(const List<HeapEntry>& nodes) {
+void HeapSnapshotJSONSerializer::SerializeNodes(const List<HeapEntry*>& nodes) {
+ const int edge_fields_count = 3; // type,name|index,to_node.
int edges_index = 0;
for (int i = 0; i < nodes.length(); ++i) {
- HeapEntry* entry = &nodes[i];
+ HeapEntry* entry = nodes[i];
SerializeNode(entry, edges_index);
- edges_index += entry->children().length() * kEdgeFieldsCount;
+ edges_index += entry->children().length() * edge_fields_count;
if (writer_->aborted()) return;
}
}
@@ -3383,15 +3903,17 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
writer_->AddString(",\"meta\":");
// The object describing node serialization layout.
// We use a set of macros to improve readability.
-#define JSON_A(s) "[" s "]"
-#define JSON_O(s) "{" s "}"
-#define JSON_S(s) "\"" s "\""
+#define JSON_A(s) "["s"]"
+#define JSON_O(s) "{"s"}"
+#define JSON_S(s) "\""s"\""
writer_->AddString(JSON_O(
JSON_S("node_fields") ":" JSON_A(
JSON_S("type") ","
JSON_S("name") ","
JSON_S("id") ","
JSON_S("self_size") ","
+ JSON_S("retained_size") ","
+ JSON_S("dominator") ","
JSON_S("edges_index")) ","
JSON_S("node_types") ":" JSON_A(
JSON_A(
@@ -3430,9 +3952,9 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
#undef JSON_O
#undef JSON_A
writer_->AddString(",\"node_count\":");
- writer_->AddNumber(snapshot_->entries().length());
+ writer_->AddNumber(snapshot_->entries()->length());
writer_->AddString(",\"edge_count\":");
- writer_->AddNumber(snapshot_->edges().length());
+ writer_->AddNumber(snapshot_->number_of_edges());
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 349226b43f..e04ddbf3e2 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -446,7 +446,6 @@ class ProfileGenerator {
class HeapEntry;
-class HeapSnapshot;
class HeapGraphEdge BASE_EMBEDDED {
public:
@@ -461,9 +460,9 @@ class HeapGraphEdge BASE_EMBEDDED {
};
HeapGraphEdge() { }
- HeapGraphEdge(Type type, const char* name, int from, int to);
- HeapGraphEdge(Type type, int index, int from, int to);
- void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
+ void Init(int child_index, Type type, const char* name, HeapEntry* to);
+ void Init(int child_index, Type type, int index, HeapEntry* to);
+ void Init(int child_index, int index, HeapEntry* to);
Type type() const { return static_cast<Type>(type_); }
int index() const {
@@ -472,34 +471,48 @@ class HeapGraphEdge BASE_EMBEDDED {
}
const char* name() const {
ASSERT(type_ == kContextVariable
- || type_ == kProperty
- || type_ == kInternal
- || type_ == kShortcut);
+ || type_ == kProperty
+ || type_ == kInternal
+ || type_ == kShortcut);
return name_;
}
+ HeapEntry* to() const { return to_; }
INLINE(HeapEntry* from() const);
- HeapEntry* to() const { return to_entry_; }
private:
- INLINE(HeapSnapshot* snapshot() const);
-
+ int child_index_ : 29;
unsigned type_ : 3;
- int from_index_ : 29;
- union {
- // During entries population |to_index_| is used for storing the index,
- // afterwards it is replaced with a pointer to the entry.
- int to_index_;
- HeapEntry* to_entry_;
- };
union {
int index_;
const char* name_;
};
+ HeapEntry* to_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
};
+class HeapSnapshot;
+
// HeapEntry instances represent an entity from the heap (or a special
-// virtual node, e.g. root).
+// virtual node, e.g. root). To make heap snapshots more compact,
+// HeapEntries has a special memory layout (no Vectors or Lists used):
+//
+// +-----------------+
+// HeapEntry
+// +-----------------+
+// HeapGraphEdge |
+// ... } children_count
+// HeapGraphEdge |
+// +-----------------+
+// HeapGraphEdge* |
+// ... } retainers_count
+// HeapGraphEdge* |
+// +-----------------+
+//
+// In a HeapSnapshot, all entries are hand-allocated in a continuous array
+// of raw bytes.
+//
class HeapEntry BASE_EMBEDDED {
public:
enum Type {
@@ -514,14 +527,15 @@ class HeapEntry BASE_EMBEDDED {
kNative = v8::HeapGraphNode::kNative,
kSynthetic = v8::HeapGraphNode::kSynthetic
};
- static const int kNoEntry;
HeapEntry() { }
- HeapEntry(HeapSnapshot* snapshot,
+ void Init(HeapSnapshot* snapshot,
Type type,
const char* name,
SnapshotObjectId id,
- int self_size);
+ int self_size,
+ int children_count,
+ int retainers_count);
HeapSnapshot* snapshot() { return snapshot_; }
Type type() { return static_cast<Type>(type_); }
@@ -529,36 +543,80 @@ class HeapEntry BASE_EMBEDDED {
void set_name(const char* name) { name_ = name; }
inline SnapshotObjectId id() { return id_; }
int self_size() { return self_size_; }
- INLINE(int index() const);
- int children_count() const { return children_count_; }
- INLINE(int set_children_index(int index));
- void add_child(HeapGraphEdge* edge) {
- children_arr()[children_count_++] = edge;
+ int retained_size() { return retained_size_; }
+ void add_retained_size(int size) { retained_size_ += size; }
+ void set_retained_size(int value) { retained_size_ = value; }
+ int ordered_index() { return ordered_index_; }
+ void set_ordered_index(int value) { ordered_index_ = value; }
+ int entry_index() { return entry_index_; }
+ void set_entry_index(int value) { entry_index_ = value; }
+
+ Vector<HeapGraphEdge> children() {
+ return Vector<HeapGraphEdge>(children_arr(), children_count_); }
+ Vector<HeapGraphEdge*> retainers() {
+ return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
+ HeapEntry* dominator() { return dominator_; }
+ void set_dominator(HeapEntry* entry) {
+ ASSERT(entry != NULL);
+ dominator_ = entry;
+ }
+ void clear_paint() { painted_ = false; }
+ bool painted() { return painted_; }
+ void paint() { painted_ = true; }
+ bool user_reachable() { return user_reachable_; }
+ void set_user_reachable() { user_reachable_ = true; }
+
+ void SetIndexedReference(HeapGraphEdge::Type type,
+ int child_index,
+ int index,
+ HeapEntry* entry,
+ int retainer_index);
+ void SetNamedReference(HeapGraphEdge::Type type,
+ int child_index,
+ const char* name,
+ HeapEntry* entry,
+ int retainer_index);
+ void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
+
+ size_t EntrySize() {
+ return EntriesSize(1, children_count_, retainers_count_);
}
- Vector<HeapGraphEdge*> children() {
- return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
-
- void SetIndexedReference(
- HeapGraphEdge::Type type, int index, HeapEntry* entry);
- void SetNamedReference(
- HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
void Print(
const char* prefix, const char* edge_name, int max_depth, int indent);
Handle<HeapObject> GetHeapObject();
+ static size_t EntriesSize(int entries_count,
+ int children_count,
+ int retainers_count);
+
private:
- INLINE(HeapGraphEdge** children_arr());
+ HeapGraphEdge* children_arr() {
+ return reinterpret_cast<HeapGraphEdge*>(this + 1);
+ }
+ HeapGraphEdge** retainers_arr() {
+ return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
+ }
const char* TypeAsString();
+ unsigned painted_: 1;
+ unsigned user_reachable_: 1;
unsigned type_: 4;
- int children_count_: 28;
- int children_index_;
+ int children_count_: 26;
+ int retainers_count_;
int self_size_;
+ union {
+ int ordered_index_; // Used during dominator tree building.
+ int retained_size_; // At that moment, there is no retained size yet.
+ };
+ int entry_index_;
SnapshotObjectId id_;
+ HeapEntry* dominator_;
HeapSnapshot* snapshot_;
const char* name_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapEntry);
};
@@ -579,55 +637,63 @@ class HeapSnapshot {
Type type,
const char* title,
unsigned uid);
+ ~HeapSnapshot();
void Delete();
HeapSnapshotsCollection* collection() { return collection_; }
Type type() { return type_; }
const char* title() { return title_; }
unsigned uid() { return uid_; }
- size_t RawSnapshotSize() const;
- HeapEntry* root() { return &entries_[root_index_]; }
- HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
- HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
- HeapEntry* gc_subroot(int index) {
- return &entries_[gc_subroot_indexes_[index]];
- }
- List<HeapEntry>& entries() { return entries_; }
- List<HeapGraphEdge>& edges() { return edges_; }
- List<HeapGraphEdge*>& children() { return children_; }
+ HeapEntry* root() { return root_entry_; }
+ HeapEntry* gc_roots() { return gc_roots_entry_; }
+ HeapEntry* natives_root() { return natives_root_entry_; }
+ HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
+ List<HeapEntry*>* entries() { return &entries_; }
+ size_t raw_entries_size() { return raw_entries_size_; }
+ int number_of_edges() { return number_of_edges_; }
void RememberLastJSObjectId();
SnapshotObjectId max_snapshot_js_object_id() const {
return max_snapshot_js_object_id_;
}
+ void AllocateEntries(
+ int entries_count, int children_count, int retainers_count);
HeapEntry* AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size);
- HeapEntry* AddRootEntry();
- HeapEntry* AddGcRootsEntry();
- HeapEntry* AddGcSubrootEntry(int tag);
- HeapEntry* AddNativesRootEntry();
+ int size,
+ int children_count,
+ int retainers_count);
+ HeapEntry* AddRootEntry(int children_count);
+ HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
+ HeapEntry* AddGcSubrootEntry(int tag,
+ int children_count,
+ int retainers_count);
+ HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
+ void ClearPaint();
HeapEntry* GetEntryById(SnapshotObjectId id);
List<HeapEntry*>* GetSortedEntriesList();
- void FillChildren();
+ void SetDominatorsToSelf();
void Print(int max_depth);
void PrintEntriesSize();
private:
+ HeapEntry* GetNextEntryToInit();
+
HeapSnapshotsCollection* collection_;
Type type_;
const char* title_;
unsigned uid_;
- int root_index_;
- int gc_roots_index_;
- int natives_root_index_;
- int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
- List<HeapEntry> entries_;
- List<HeapGraphEdge> edges_;
- List<HeapGraphEdge*> children_;
+ HeapEntry* root_entry_;
+ HeapEntry* gc_roots_entry_;
+ HeapEntry* natives_root_entry_;
+ HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
+ char* raw_entries_;
+ List<HeapEntry*> entries_;
List<HeapEntry*> sorted_entries_;
+ size_t raw_entries_size_;
+ int number_of_edges_;
SnapshotObjectId max_snapshot_js_object_id_;
friend class HeapSnapshotTester;
@@ -762,7 +828,8 @@ typedef void* HeapThing;
class HeapEntriesAllocator {
public:
virtual ~HeapEntriesAllocator() { }
- virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
+ virtual HeapEntry* AllocateEntry(
+ HeapThing ptr, int children_count, int retainers_count) = 0;
};
@@ -771,11 +838,37 @@ class HeapEntriesAllocator {
class HeapEntriesMap {
public:
HeapEntriesMap();
+ ~HeapEntriesMap();
+
+ void AllocateEntries(HeapThing root_object);
+ HeapEntry* Map(HeapThing thing);
+ void Pair(HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry);
+ void CountReference(HeapThing from, HeapThing to,
+ int* prev_children_count = NULL,
+ int* prev_retainers_count = NULL);
- int Map(HeapThing thing);
- void Pair(HeapThing thing, int entry);
+ int entries_count() { return entries_count_; }
+ int total_children_count() { return total_children_count_; }
+ int total_retainers_count() { return total_retainers_count_; }
+
+ static HeapEntry* const kHeapEntryPlaceholder;
private:
+ struct EntryInfo {
+ EntryInfo(HeapEntry* entry, HeapEntriesAllocator* allocator)
+ : entry(entry),
+ allocator(allocator),
+ children_count(0),
+ retainers_count(0) {
+ }
+ HeapEntry* entry;
+ HeapEntriesAllocator* allocator;
+ int children_count;
+ int retainers_count;
+ };
+
+ static inline void AllocateHeapEntryForMapEntry(HashMap::Entry* map_entry);
+
static uint32_t Hash(HeapThing thing) {
return ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
@@ -786,6 +879,9 @@ class HeapEntriesMap {
}
HashMap entries_;
+ int entries_count_;
+ int total_children_count_;
+ int total_retainers_count_;
friend class HeapObjectsSet;
@@ -820,18 +916,26 @@ class SnapshotFillerInterface {
virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
HeapEntriesAllocator* allocator) = 0;
virtual void SetIndexedReference(HeapGraphEdge::Type type,
- int parent_entry,
+ HeapThing parent_ptr,
+ HeapEntry* parent_entry,
int index,
+ HeapThing child_ptr,
HeapEntry* child_entry) = 0;
virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
+ HeapThing parent_ptr,
+ HeapEntry* parent_entry,
+ HeapThing child_ptr,
HeapEntry* child_entry) = 0;
virtual void SetNamedReference(HeapGraphEdge::Type type,
- int parent_entry,
+ HeapThing parent_ptr,
+ HeapEntry* parent_entry,
const char* reference_name,
+ HeapThing child_ptr,
HeapEntry* child_entry) = 0;
virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
+ HeapThing parent_ptr,
+ HeapEntry* parent_entry,
+ HeapThing child_ptr,
HeapEntry* child_entry) = 0;
};
@@ -850,10 +954,12 @@ class V8HeapExplorer : public HeapEntriesAllocator {
V8HeapExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress);
virtual ~V8HeapExplorer();
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
+ virtual HeapEntry* AllocateEntry(
+ HeapThing ptr, int children_count, int retainers_count);
void AddRootEntries(SnapshotFillerInterface* filler);
int EstimateObjectsCount(HeapIterator* iterator);
bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ bool IterateAndSetObjectNames(SnapshotFillerInterface* filler);
void TagGlobalObjects();
static String* GetConstructorName(JSObject* object);
@@ -861,77 +967,81 @@ class V8HeapExplorer : public HeapEntriesAllocator {
static HeapObject* const kInternalRootObject;
private:
- HeapEntry* AddEntry(HeapObject* object);
+ HeapEntry* AddEntry(
+ HeapObject* object, int children_count, int retainers_count);
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
- const char* name);
+ const char* name,
+ int children_count,
+ int retainers_count);
const char* GetSystemEntryName(HeapObject* object);
void ExtractReferences(HeapObject* obj);
void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
- void ExtractJSObjectReferences(int entry, JSObject* js_obj);
- void ExtractStringReferences(int entry, String* obj);
- void ExtractContextReferences(int entry, Context* context);
- void ExtractMapReferences(int entry, Map* map);
- void ExtractSharedFunctionInfoReferences(int entry,
+ void ExtractJSObjectReferences(HeapEntry* entry, JSObject* js_obj);
+ void ExtractStringReferences(HeapEntry* entry, String* obj);
+ void ExtractContextReferences(HeapEntry* entry, Context* context);
+ void ExtractMapReferences(HeapEntry* entry, Map* map);
+ void ExtractSharedFunctionInfoReferences(HeapEntry* entry,
SharedFunctionInfo* shared);
- void ExtractScriptReferences(int entry, Script* script);
- void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
- void ExtractCodeReferences(int entry, Code* code);
- void ExtractJSGlobalPropertyCellReferences(int entry,
+ void ExtractScriptReferences(HeapEntry* entry, Script* script);
+ void ExtractCodeCacheReferences(HeapEntry* entry, CodeCache* code_cache);
+ void ExtractCodeReferences(HeapEntry* entry, Code* code);
+ void ExtractJSGlobalPropertyCellReferences(HeapEntry* entry,
JSGlobalPropertyCell* cell);
- void ExtractClosureReferences(JSObject* js_obj, int entry);
- void ExtractPropertyReferences(JSObject* js_obj, int entry);
- void ExtractElementReferences(JSObject* js_obj, int entry);
- void ExtractInternalReferences(JSObject* js_obj, int entry);
+ void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
+ void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
+ void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
+ void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
bool IsEssentialObject(Object* object);
void SetClosureReference(HeapObject* parent_obj,
- int parent,
+ HeapEntry* parent,
String* reference_name,
Object* child);
void SetNativeBindReference(HeapObject* parent_obj,
- int parent,
+ HeapEntry* parent,
const char* reference_name,
Object* child);
void SetElementReference(HeapObject* parent_obj,
- int parent,
+ HeapEntry* parent,
int index,
Object* child);
void SetInternalReference(HeapObject* parent_obj,
- int parent,
+ HeapEntry* parent,
const char* reference_name,
Object* child,
int field_offset = -1);
void SetInternalReference(HeapObject* parent_obj,
- int parent,
+ HeapEntry* parent,
int index,
Object* child,
int field_offset = -1);
void SetHiddenReference(HeapObject* parent_obj,
- int parent,
+ HeapEntry* parent,
int index,
Object* child);
void SetWeakReference(HeapObject* parent_obj,
- int parent,
+ HeapEntry* parent_entry,
int index,
Object* child_obj,
int field_offset);
void SetPropertyReference(HeapObject* parent_obj,
- int parent,
+ HeapEntry* parent,
String* reference_name,
Object* child,
const char* name_format_string = NULL,
int field_offset = -1);
void SetPropertyShortcutReference(HeapObject* parent_obj,
- int parent,
+ HeapEntry* parent,
String* reference_name,
Object* child);
- void SetUserGlobalReference(Object* user_global);
+ void SetUserGlobalReference(Object* window);
void SetRootGcRootsReference();
void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
void SetGcSubrootReference(
VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
const char* GetStrongGcSubrootName(Object* object);
+ void SetObjectName(HeapObject* object);
void TagObject(Object* obj, const char* tag);
HeapEntry* GetEntry(Object* obj);
@@ -1026,9 +1136,17 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
bool GenerateSnapshot();
private:
+ bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
+ Vector<int>* dominators);
+ bool CalculateRetainedSizes();
+ bool CountEntriesAndReferences();
bool FillReferences();
+ void FillPostorderIndexes(Vector<HeapEntry*>* entries);
+ bool IsUserGlobalReference(const HeapGraphEdge& edge);
+ void MarkUserReachableObjects();
void ProgressStep();
bool ProgressReport(bool force = false);
+ bool SetEntriesDominators();
void SetProgressTotal(int iterations_count);
HeapSnapshot* snapshot_;
@@ -1068,21 +1186,20 @@ class HeapSnapshotJSONSerializer {
v8::internal::kZeroHashSeed);
}
+ void CalculateNodeIndexes(const List<HeapEntry*>& nodes);
HeapSnapshot* CreateFakeSnapshot();
int GetStringId(const char* s);
- int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
- void SerializeEdges(const List<HeapEntry>& nodes);
+ void SerializeEdges(const List<HeapEntry*>& nodes);
void SerializeImpl();
void SerializeNode(HeapEntry* entry, int edges_index);
- void SerializeNodes(const List<HeapEntry>& nodes);
+ void SerializeNodes(const List<HeapEntry*>& nodes);
void SerializeSnapshot();
void SerializeString(const unsigned char* s);
void SerializeStrings();
void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
- static const int kEdgeFieldsCount;
- static const int kNodeFieldsCount;
+ static const int kMaxSerializableSnapshotRawSize;
HeapSnapshot* snapshot_;
HashMap strings_;
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index 363b1ab553..aa67919c54 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -203,9 +203,8 @@ void RegExpMacroAssemblerIrregexp::PushBacktrack(Label* l) {
}
-bool RegExpMacroAssemblerIrregexp::Succeed() {
+void RegExpMacroAssemblerIrregexp::Succeed() {
Emit(BC_SUCCEED, 0);
- return false; // Restart matching for global regexp not supported.
}
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h
index d64a3d800c..25cb68de0b 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -59,7 +59,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void Backtrack();
virtual void GoTo(Label* label);
virtual void PushBacktrack(Label* label);
- virtual bool Succeed();
+ virtual void Succeed();
virtual void Fail();
virtual void PopRegister(int register_index);
virtual void PushRegister(int register_index,
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index c45fd448df..b7aeac48db 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -102,15 +102,14 @@ void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) {
}
-bool RegExpMacroAssemblerTracer::Succeed() {
- bool restart = assembler_->Succeed();
- PrintF(" Succeed();%s\n", restart ? " [restart for global match]" : "");
- return restart;
+void RegExpMacroAssemblerTracer::Succeed() {
+ PrintF(" Succeed();\n");
+ assembler_->Succeed();
}
void RegExpMacroAssemblerTracer::Fail() {
- PrintF(" Fail();");
+ PrintF(" Fail();\n");
assembler_->Fail();
}
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h
index a915835855..3fd4d8b39a 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp-macro-assembler-tracer.h
@@ -98,7 +98,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
+ virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 08568de9ef..b6fb3c5214 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,9 +35,7 @@
namespace v8 {
namespace internal {
-RegExpMacroAssembler::RegExpMacroAssembler()
- : slow_safe_compiler_(false),
- global_(false) {
+RegExpMacroAssembler::RegExpMacroAssembler() : slow_safe_compiler_(false) {
}
@@ -151,7 +149,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
input_start,
input_end,
offsets_vector,
- offsets_vector_length,
isolate);
return res;
}
@@ -164,7 +161,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
const byte* input_start,
const byte* input_end,
int* output,
- int output_size,
Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
// Ensure that the minimum stack has been allocated.
@@ -178,10 +174,10 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
input_start,
input_end,
output,
- output_size,
stack_base,
direct_call,
isolate);
+ ASSERT(result <= SUCCESS);
ASSERT(result >= RETRY);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index 5b2cf4aa8f..85874358e5 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -174,8 +174,7 @@ class RegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg) = 0;
virtual void SetCurrentPositionFromEnd(int by) = 0;
virtual void SetRegister(int register_index, int to) = 0;
- // Return whether the matching (with a global regexp) will be restarted.
- virtual bool Succeed() = 0;
+ virtual void Succeed() = 0;
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;
virtual void ClearRegisters(int reg_from, int reg_to) = 0;
virtual void WriteStackPointerToRegister(int reg) = 0;
@@ -184,14 +183,8 @@ class RegExpMacroAssembler {
void set_slow_safe(bool ssc) { slow_safe_compiler_ = ssc; }
bool slow_safe() { return slow_safe_compiler_; }
- // Set whether the regular expression has the global flag. Exiting due to
- // a failure in a global regexp may still mean success overall.
- void set_global(bool global) { global_ = global; }
- bool global() { return global_; }
-
private:
bool slow_safe_compiler_;
- bool global_;
};
@@ -256,7 +249,6 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
const byte* input_start,
const byte* input_end,
int* output,
- int output_size,
Isolate* isolate);
};
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index 38090397aa..a574f62bf6 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -278,10 +278,6 @@ function TrimRegExp(regexp) {
function RegExpToString() {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['RegExp.prototype.toString', this]);
- }
var result = '/' + this.source + '/';
if (this.global) result += 'g';
if (this.ignoreCase) result += 'i';
@@ -427,7 +423,6 @@ function SetUpRegExp() {
LAST_INPUT(lastMatchInfo) = ToString(string);
};
- %OptimizeObjectForAddingMultipleProperties($RegExp, 22);
%DefineOrRedefineAccessorProperty($RegExp, 'input', RegExpGetInput,
RegExpSetInput, DONT_DELETE);
%DefineOrRedefineAccessorProperty($RegExp, '$_', RegExpGetInput,
@@ -482,7 +477,6 @@ function SetUpRegExp() {
RegExpMakeCaptureGetter(i), NoOpSetter,
DONT_DELETE);
}
- %ToFastProperties($RegExp);
}
SetUpRegExp();
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index d18a158c44..6163ca8fa3 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -208,10 +208,8 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
// Pixel elements cannot be created using an object literal.
ASSERT(!copy->HasExternalArrayElements());
switch (copy->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements());
if (elements->map() == heap->fixed_cow_array_map()) {
isolate->counters()->cow_arrays_created_runtime()->Increment();
@@ -225,7 +223,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
Object* value = elements->get(i);
ASSERT(value->IsSmi() ||
value->IsTheHole() ||
- (IsFastObjectElementsKind(copy->GetElementsKind())));
+ (copy->GetElementsKind() == FAST_ELEMENTS));
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
{ MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
@@ -270,7 +268,6 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
// No contained objects, nothing to do.
break;
}
@@ -455,7 +452,7 @@ MaybeObject* TransitionElements(Handle<Object> object,
}
-static const int kSmiLiteralMinimumLength = 1024;
+static const int kSmiOnlyLiteralMinimumLength = 1024;
Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
@@ -473,22 +470,23 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(elements->get(1)));
- ASSERT(IsFastElementsKind(constant_elements_kind));
Context* global_context = isolate->context()->global_context();
- Object* maybe_maps_array = global_context->js_array_maps();
- ASSERT(!maybe_maps_array->IsUndefined());
- Object* maybe_map = FixedArray::cast(maybe_maps_array)->get(
- constant_elements_kind);
- ASSERT(maybe_map->IsMap());
- object->set_map(Map::cast(maybe_map));
+ if (constant_elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ object->set_map(Map::cast(global_context->smi_js_array_map()));
+ } else if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
+ object->set_map(Map::cast(global_context->double_js_array_map()));
+ } else {
+ object->set_map(Map::cast(global_context->object_js_array_map()));
+ }
Handle<FixedArrayBase> copied_elements_values;
- if (IsFastDoubleElementsKind(constant_elements_kind)) {
+ if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
ASSERT(FLAG_smi_only_arrays);
copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
Handle<FixedDoubleArray>::cast(constant_elements_values));
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind));
+ ASSERT(constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ constant_elements_kind == FAST_ELEMENTS);
const bool is_cow =
(constant_elements_values->map() ==
isolate->heap()->fixed_cow_array_map());
@@ -524,22 +522,15 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
object->set_elements(*copied_elements_values);
object->set_length(Smi::FromInt(copied_elements_values->length()));
- // Ensure that the boilerplate object has FAST_*_ELEMENTS, unless the flag is
+ // Ensure that the boilerplate object has FAST_ELEMENTS, unless the flag is
// on or the object is larger than the threshold.
if (!FLAG_smi_only_arrays &&
- constant_elements_values->length() < kSmiLiteralMinimumLength) {
- ElementsKind elements_kind = object->GetElementsKind();
- if (!IsFastObjectElementsKind(elements_kind)) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- CHECK(!TransitionElements(object, FAST_HOLEY_ELEMENTS,
- isolate)->IsFailure());
- } else {
- CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure());
- }
+ constant_elements_values->length() < kSmiOnlyLiteralMinimumLength) {
+ if (object->GetElementsKind() != FAST_ELEMENTS) {
+ CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure());
}
}
- object->ValidateElements();
return object;
}
@@ -1739,7 +1730,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
// length of a string, i.e. it is always a Smi. We check anyway for security.
CONVERT_SMI_ARG_CHECKED(index, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
+ RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
isolate->counters()->regexp_entry_runtime()->Increment();
@@ -3113,7 +3104,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
bool matched = true;
do {
- ASSERT(last_match_info_handle->HasFastObjectElements());
+ ASSERT(last_match_info_handle->HasFastElements());
// Increase the capacity of the builder before entering local handle-scope,
// so its internal buffer can safely allocate a new handle if it grows.
builder.EnsureCapacity(parts_added_per_loop);
@@ -3210,7 +3201,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
if (match.is_null()) return Failure::Exception();
if (match->IsNull()) return *subject_handle;
- ASSERT(last_match_info_handle->HasFastObjectElements());
+ ASSERT(last_match_info_handle->HasFastElements());
int start, end;
{
@@ -3284,7 +3275,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
if (match.is_null()) return Failure::Exception();
if (match->IsNull()) break;
- ASSERT(last_match_info_handle->HasFastObjectElements());
+ ASSERT(last_match_info_handle->HasFastElements());
HandleScope loop_scope(isolate);
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
@@ -3354,7 +3345,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
CONVERT_ARG_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
- ASSERT(last_match_info->HasFastObjectElements());
+ ASSERT(last_match_info->HasFastElements());
if (replacement->length() == 0) {
if (subject->HasOnlyAsciiChars()) {
@@ -3804,73 +3795,62 @@ static bool SearchStringMultiple(Isolate* isolate,
}
-static int SearchRegExpNoCaptureMultiple(
+static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
Handle<JSArray> last_match_array,
FixedArrayBuilder* builder) {
ASSERT(subject->IsFlat());
- ASSERT(regexp->CaptureCount() == 0);
int match_start = -1;
int match_end = 0;
int pos = 0;
- int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject);
- if (registers_per_match < 0) return RegExpImpl::RE_EXCEPTION;
-
- int max_matches;
- int num_registers = RegExpImpl::GlobalOffsetsVectorSize(regexp,
- registers_per_match,
- &max_matches);
- OffsetsVector registers(num_registers, isolate);
+ int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
+ if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
+
+ OffsetsVector registers(required_registers, isolate);
Vector<int32_t> register_vector(registers.vector(), registers.length());
int subject_length = subject->length();
bool first = true;
+
for (;;) { // Break on failure, return on exception.
- int num_matches = RegExpImpl::IrregexpExecRaw(regexp,
- subject,
- pos,
- register_vector);
- if (num_matches > 0) {
- for (int match_index = 0; match_index < num_matches; match_index++) {
- int32_t* current_match = &register_vector[match_index * 2];
- match_start = current_match[0];
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- match_start);
- }
- match_end = current_match[1];
- HandleScope loop_scope(isolate);
- if (!first) {
- builder->Add(*isolate->factory()->NewProperSubString(subject,
- match_start,
- match_end));
- } else {
- builder->Add(*isolate->factory()->NewSubString(subject,
- match_start,
- match_end));
- first = false;
- }
+ RegExpImpl::IrregexpResult result =
+ RegExpImpl::IrregexpExecOnce(regexp,
+ subject,
+ pos,
+ register_vector);
+ if (result == RegExpImpl::RE_SUCCESS) {
+ match_start = register_vector[0];
+ builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ if (match_end < match_start) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ match_start);
+ }
+ match_end = register_vector[1];
+ HandleScope loop_scope(isolate);
+ if (!first) {
+ builder->Add(*isolate->factory()->NewProperSubString(subject,
+ match_start,
+ match_end));
+ } else {
+ builder->Add(*isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end));
}
-
- // If we did not get the maximum number of matches, we can stop here
- // since there are no matches left.
- if (num_matches < max_matches) break;
-
if (match_start != match_end) {
pos = match_end;
} else {
pos = match_end + 1;
if (pos > subject_length) break;
}
- } else if (num_matches == 0) {
+ } else if (result == RegExpImpl::RE_FAILURE) {
break;
} else {
- ASSERT_EQ(num_matches, RegExpImpl::RE_EXCEPTION);
- return RegExpImpl::RE_EXCEPTION;
+ ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
+ return result;
}
+ first = false;
}
if (match_start >= 0) {
@@ -3892,7 +3872,7 @@ static int SearchRegExpNoCaptureMultiple(
// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
// separate last match info. See comment on that function.
-static int SearchRegExpMultiple(
+static RegExpImpl::IrregexpResult SearchRegExpMultiple(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
@@ -3900,20 +3880,17 @@ static int SearchRegExpMultiple(
FixedArrayBuilder* builder) {
ASSERT(subject->IsFlat());
- int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject);
- if (registers_per_match < 0) return RegExpImpl::RE_EXCEPTION;
-
- int max_matches;
- int num_registers = RegExpImpl::GlobalOffsetsVectorSize(regexp,
- registers_per_match,
- &max_matches);
- OffsetsVector registers(num_registers, isolate);
+ int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
+ if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
+
+ OffsetsVector registers(required_registers, isolate);
Vector<int32_t> register_vector(registers.vector(), registers.length());
- int num_matches = RegExpImpl::IrregexpExecRaw(regexp,
- subject,
- 0,
- register_vector);
+ RegExpImpl::IrregexpResult result =
+ RegExpImpl::IrregexpExecOnce(regexp,
+ subject,
+ 0,
+ register_vector);
int capture_count = regexp->CaptureCount();
int subject_length = subject->length();
@@ -3922,71 +3899,60 @@ static int SearchRegExpMultiple(
int pos = 0;
// End of previous match. Differs from pos if match was empty.
int match_end = 0;
- bool first = true;
-
- if (num_matches > 0) {
+ if (result == RegExpImpl::RE_SUCCESS) {
+ bool first = true;
do {
- int match_start = 0;
- for (int match_index = 0; match_index < num_matches; match_index++) {
- int32_t* current_match =
- &register_vector[match_index * registers_per_match];
- match_start = current_match[0];
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- match_start);
+ int match_start = register_vector[0];
+ builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ if (match_end < match_start) {
+ ReplacementStringBuilder::AddSubjectSlice(builder,
+ match_end,
+ match_start);
+ }
+ match_end = register_vector[1];
+
+ {
+ // Avoid accumulating new handles inside loop.
+ HandleScope temp_scope(isolate);
+ // Arguments array to replace function is match, captures, index and
+ // subject, i.e., 3 + capture count in total.
+ Handle<FixedArray> elements =
+ isolate->factory()->NewFixedArray(3 + capture_count);
+ Handle<String> match;
+ if (!first) {
+ match = isolate->factory()->NewProperSubString(subject,
+ match_start,
+ match_end);
+ } else {
+ match = isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end);
}
- match_end = current_match[1];
-
- {
- // Avoid accumulating new handles inside loop.
- HandleScope temp_scope(isolate);
- // Arguments array to replace function is match, captures, index and
- // subject, i.e., 3 + capture count in total.
- Handle<FixedArray> elements =
- isolate->factory()->NewFixedArray(3 + capture_count);
- Handle<String> match;
- if (!first) {
- match = isolate->factory()->NewProperSubString(subject,
- match_start,
- match_end);
- } else {
- match = isolate->factory()->NewSubString(subject,
- match_start,
- match_end);
- }
- elements->set(0, *match);
- for (int i = 1; i <= capture_count; i++) {
- int start = current_match[i * 2];
- if (start >= 0) {
- int end = current_match[i * 2 + 1];
- ASSERT(start <= end);
- Handle<String> substring;
- if (!first) {
- substring =
- isolate->factory()->NewProperSubString(subject, start, end);
- } else {
- substring =
- isolate->factory()->NewSubString(subject, start, end);
- }
- elements->set(i, *substring);
+ elements->set(0, *match);
+ for (int i = 1; i <= capture_count; i++) {
+ int start = register_vector[i * 2];
+ if (start >= 0) {
+ int end = register_vector[i * 2 + 1];
+ ASSERT(start <= end);
+ Handle<String> substring;
+ if (!first) {
+ substring = isolate->factory()->NewProperSubString(subject,
+ start,
+ end);
} else {
- ASSERT(current_match[i * 2 + 1] < 0);
- elements->set(i, isolate->heap()->undefined_value());
+ substring = isolate->factory()->NewSubString(subject, start, end);
}
+ elements->set(i, *substring);
+ } else {
+ ASSERT(register_vector[i * 2 + 1] < 0);
+ elements->set(i, isolate->heap()->undefined_value());
}
- elements->set(capture_count + 1, Smi::FromInt(match_start));
- elements->set(capture_count + 2, *subject);
- builder->Add(*isolate->factory()->NewJSArrayWithElements(elements));
}
- first = false;
+ elements->set(capture_count + 1, Smi::FromInt(match_start));
+ elements->set(capture_count + 2, *subject);
+ builder->Add(*isolate->factory()->NewJSArrayWithElements(elements));
}
- // If we did not get the maximum number of matches, we can stop here
- // since there are no matches left.
- if (num_matches < max_matches) break;
-
if (match_end > match_start) {
pos = match_end;
} else {
@@ -3996,13 +3962,14 @@ static int SearchRegExpMultiple(
}
}
- num_matches = RegExpImpl::IrregexpExecRaw(regexp,
- subject,
- pos,
- register_vector);
- } while (num_matches > 0);
+ result = RegExpImpl::IrregexpExecOnce(regexp,
+ subject,
+ pos,
+ register_vector);
+ first = false;
+ } while (result == RegExpImpl::RE_SUCCESS);
- if (num_matches != RegExpImpl::RE_EXCEPTION) {
+ if (result != RegExpImpl::RE_EXCEPTION) {
// Finished matching, with at least one match.
if (match_end < subject_length) {
ReplacementStringBuilder::AddSubjectSlice(builder,
@@ -4026,7 +3993,7 @@ static int SearchRegExpMultiple(
}
}
// No matches at all, return failure or exception result directly.
- return num_matches;
+ return result;
}
@@ -4043,10 +4010,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
- ASSERT(last_match_info->HasFastObjectElements());
+ ASSERT(last_match_info->HasFastElements());
ASSERT(regexp->GetFlags().is_global());
Handle<FixedArray> result_elements;
- if (result_array->HasFastObjectElements()) {
+ if (result_array->HasFastElements()) {
result_elements =
Handle<FixedArray>(FixedArray::cast(result_array->elements()));
}
@@ -4068,7 +4035,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
- int result;
+ RegExpImpl::IrregexpResult result;
if (regexp->CaptureCount() == 0) {
result = SearchRegExpNoCaptureMultiple(isolate,
subject,
@@ -4348,22 +4315,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// JSObject without a string key. If the key is a Smi, check for a
// definite out-of-bounds access to elements, which is a strong indicator
// that subsequent accesses will also call the runtime. Proactively
- // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
+ // transition elements to FAST_ELEMENTS to avoid excessive boxing of
// doubles for those future calls in the case that the elements would
// become FAST_DOUBLE_ELEMENTS.
Handle<JSObject> js_object(args.at<JSObject>(0));
ElementsKind elements_kind = js_object->GetElementsKind();
- if (IsFastElementsKind(elements_kind) &&
- !IsFastObjectElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_DOUBLE_ELEMENTS) {
FixedArrayBase* elements = js_object->elements();
if (args.at<Smi>(1)->value() >= elements->length()) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- elements_kind = FAST_ELEMENTS;
- }
MaybeObject* maybe_object = TransitionElements(js_object,
- elements_kind,
+ FAST_ELEMENTS,
isolate);
if (maybe_object->IsFailure()) return maybe_object;
}
@@ -4533,10 +4495,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
return *value;
}
- js_object->ValidateElements();
Handle<Object> result = JSObject::SetElement(
js_object, index, value, attr, strict_mode, set_mode);
- js_object->ValidateElements();
if (result.is_null()) return Failure::Exception();
return *value;
}
@@ -4694,15 +4654,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
- Handle<JSObject> js_object(Handle<JSObject>::cast(object));
- ElementsKind new_kind = js_object->HasFastHoleyElements()
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
- return TransitionElements(object, new_kind, isolate);
- } else {
- return *object;
- }
+ return TransitionElements(object, FAST_DOUBLE_ELEMENTS, isolate);
}
@@ -4710,15 +4662,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
- Handle<JSObject> js_object(Handle<JSObject>::cast(object));
- ElementsKind new_kind = js_object->HasFastHoleyElements()
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
- return TransitionElements(object, new_kind, isolate);
- } else {
- return *object;
- }
+ return TransitionElements(object, FAST_ELEMENTS, isolate);
}
@@ -4749,38 +4693,32 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
HandleScope scope;
Object* raw_boilerplate_object = literals->get(literal_index);
- Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
+ Handle<JSArray> boilerplate(JSArray::cast(raw_boilerplate_object));
+#if DEBUG
ElementsKind elements_kind = object->GetElementsKind();
- ASSERT(IsFastElementsKind(elements_kind));
+#endif
+ ASSERT(elements_kind <= FAST_DOUBLE_ELEMENTS);
// Smis should never trigger transitions.
ASSERT(!value->IsSmi());
if (value->IsNumber()) {
- ASSERT(IsFastSmiElementsKind(elements_kind));
- ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
- if (IsMoreGeneralElementsKindTransition(
- boilerplate_object->GetElementsKind(),
- transitioned_kind)) {
- JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
- }
- JSObject::TransitionElementsKind(object, transitioned_kind);
- ASSERT(IsFastDoubleElementsKind(object->GetElementsKind()));
+ ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+ JSObject::TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+ if (IsMoreGeneralElementsKindTransition(boilerplate->GetElementsKind(),
+ FAST_DOUBLE_ELEMENTS)) {
+ JSObject::TransitionElementsKind(boilerplate, FAST_DOUBLE_ELEMENTS);
+ }
+ ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
FixedDoubleArray* double_array = FixedDoubleArray::cast(object->elements());
HeapNumber* number = HeapNumber::cast(*value);
double_array->set(store_index, number->Number());
} else {
- ASSERT(IsFastSmiElementsKind(elements_kind) ||
- IsFastDoubleElementsKind(elements_kind));
- ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
- JSObject::TransitionElementsKind(object, transitioned_kind);
- if (IsMoreGeneralElementsKindTransition(
- boilerplate_object->GetElementsKind(),
- transitioned_kind)) {
- JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
+ ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_DOUBLE_ELEMENTS);
+ JSObject::TransitionElementsKind(object, FAST_ELEMENTS);
+ if (IsMoreGeneralElementsKindTransition(boilerplate->GetElementsKind(),
+ FAST_ELEMENTS)) {
+ JSObject::TransitionElementsKind(boilerplate, FAST_ELEMENTS);
}
FixedArray* object_array = FixedArray::cast(object->elements());
object_array->set(store_index, *value);
@@ -5993,9 +5931,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSArray, array, 0);
- if (!array->HasFastObjectElements()) {
- return isolate->heap()->undefined_value();
- }
+ if (!array->HasFastElements()) return isolate->heap()->undefined_value();
FixedArray* elements = FixedArray::cast(array->elements());
int n = elements->length();
bool ascii = true;
@@ -6438,7 +6374,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
if (maybe_result->IsFailure()) return maybe_result;
result->set_length(Smi::FromInt(part_count));
- ASSERT(result->HasFastObjectElements());
+ ASSERT(result->HasFastElements());
if (part_count == 1 && indices.at(0) == subject_length) {
FixedArray::cast(result->elements())->set(0, *subject);
@@ -6457,7 +6393,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
}
if (limit == 0xffffffffu) {
- if (result->HasFastObjectElements()) {
+ if (result->HasFastElements()) {
StringSplitCache::Enter(isolate->heap(),
isolate->heap()->string_split_cache(),
*subject,
@@ -6814,7 +6750,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (maybe_result->IsFailure()) return maybe_result;
int special_length = special->length();
- if (!array->HasFastObjectElements()) {
+ if (!array->HasFastElements()) {
return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
@@ -6924,7 +6860,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
int array_length = args.smi_at(1);
CONVERT_ARG_CHECKED(String, separator, 2);
- if (!array->HasFastObjectElements()) {
+ if (!array->HasFastElements()) {
return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
@@ -7041,7 +6977,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
- RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
+ RUNTIME_ASSERT(elements_array->HasFastElements() ||
+ elements_array->HasFastSmiOnlyElements());
CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
CONVERT_ARG_CHECKED(String, separator, 2);
// elements_array is fast-mode JSarray of alternating positions
@@ -8340,19 +8277,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- Code* unoptimized = function->shared()->code();
- if (unoptimized->kind() == Code::FUNCTION) {
- unoptimized->ClearInlineCaches();
- unoptimized->ClearTypeFeedbackCells(isolate->heap());
- }
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
#if defined(USE_SIMULATOR)
return isolate->heap()->true_value();
@@ -9202,7 +9126,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
MaybeObject* maybe_result_array =
output->EnsureCanContainHeapObjectElements();
if (maybe_result_array->IsFailure()) return maybe_result_array;
- RUNTIME_ASSERT(output->HasFastObjectElements());
+ RUNTIME_ASSERT(output->HasFastElements());
AssertNoAllocation no_allocation;
@@ -9434,7 +9358,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, array, 0);
CONVERT_ARG_CHECKED(JSObject, element, 1);
- RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
+ RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
for (int i = 0; i < length; i++) {
@@ -9519,7 +9443,7 @@ class ArrayConcatVisitor {
Handle<Map> map;
if (fast_elements_) {
map = isolate_->factory()->GetElementsTransitionMap(array,
- FAST_HOLEY_ELEMENTS);
+ FAST_ELEMENTS);
} else {
map = isolate_->factory()->GetElementsTransitionMap(array,
DICTIONARY_ELEMENTS);
@@ -9578,10 +9502,8 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
uint32_t length = static_cast<uint32_t>(array->length()->Number());
int element_count = 0;
switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
ASSERT(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
@@ -9593,7 +9515,6 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
break;
}
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
// TODO(1810): Decide if it's worthwhile to implement this.
UNREACHABLE();
break;
@@ -9684,10 +9605,8 @@ static void CollectElementIndices(Handle<JSObject> object,
List<uint32_t>* indices) {
ElementsKind kind = object->GetElementsKind();
switch (kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_ELEMENTS: {
Handle<FixedArray> elements(FixedArray::cast(object->elements()));
uint32_t length = static_cast<uint32_t>(elements->length());
if (range < length) length = range;
@@ -9698,7 +9617,6 @@ static void CollectElementIndices(Handle<JSObject> object,
}
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// TODO(1810): Decide if it's worthwhile to implement this.
UNREACHABLE();
@@ -9813,10 +9731,8 @@ static bool IterateElements(Isolate* isolate,
ArrayConcatVisitor* visitor) {
uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
switch (receiver->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
@@ -9837,7 +9753,6 @@ static bool IterateElements(Isolate* isolate,
}
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// TODO(1810): Decide if it's worthwhile to implement this.
UNREACHABLE();
@@ -9935,7 +9850,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0);
int argument_count = static_cast<int>(arguments->length()->Number());
- RUNTIME_ASSERT(arguments->HasFastObjectElements());
+ RUNTIME_ASSERT(arguments->HasFastElements());
Handle<FixedArray> elements(FixedArray::cast(arguments->elements()));
// Pass 1: estimate the length and number of elements of the result.
@@ -9955,14 +9870,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
Handle<JSArray> array(Handle<JSArray>::cast(obj));
// TODO(1810): Find out if it's worthwhile to properly support
// arbitrary ElementsKinds. For now, pessimistically transition to
- // FAST_*_ELEMENTS.
+ // FAST_ELEMENTS.
if (array->HasFastDoubleElements()) {
- ElementsKind to_kind = FAST_ELEMENTS;
- if (array->HasFastHoleyElements()) {
- to_kind = FAST_HOLEY_ELEMENTS;
- }
array = Handle<JSArray>::cast(
- JSObject::TransitionElementsKind(array, to_kind));
+ JSObject::TransitionElementsKind(array, FAST_ELEMENTS));
}
length_estimate =
static_cast<uint32_t>(array->length()->Number());
@@ -10059,22 +9970,29 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, from, 0);
CONVERT_ARG_CHECKED(JSArray, to, 1);
- from->ValidateElements();
- to->ValidateElements();
FixedArrayBase* new_elements = from->elements();
- ElementsKind from_kind = from->GetElementsKind();
MaybeObject* maybe_new_map;
- maybe_new_map = to->GetElementsTransitionMap(isolate, from_kind);
+ ElementsKind elements_kind;
+ if (new_elements->map() == isolate->heap()->fixed_array_map() ||
+ new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
+ elements_kind = FAST_ELEMENTS;
+ } else if (new_elements->map() ==
+ isolate->heap()->fixed_double_array_map()) {
+ elements_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ elements_kind = DICTIONARY_ELEMENTS;
+ }
+ maybe_new_map = to->GetElementsTransitionMap(isolate, elements_kind);
Object* new_map;
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- to->set_map_and_elements(Map::cast(new_map), new_elements);
+ to->set_map(Map::cast(new_map));
+ to->set_elements(new_elements);
to->set_length(from->length());
Object* obj;
{ MaybeObject* maybe_obj = from->ResetElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
from->set_length(Smi::FromInt(0));
- to->ValidateElements();
return to;
}
@@ -10124,7 +10042,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
}
return *isolate->factory()->NewJSArrayWithElements(keys);
} else {
- ASSERT(array->HasFastSmiOrObjectElements() ||
+ ASSERT(array->HasFastElements() ||
+ array->HasFastSmiOnlyElements() ||
array->HasFastDoubleElements());
Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
// -1 means start of array.
@@ -13439,11 +13358,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
return isolate->heap()->ToBoolean(obj->Has##Name()); \
}
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastObjectElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index fc0a47279c..7305256de7 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -89,7 +89,6 @@ namespace internal {
F(NotifyDeoptimized, 1, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
- F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
F(GetOptimizationStatus, 1, 1) \
@@ -364,11 +363,9 @@ namespace internal {
F(IS_VAR, 1, 1) \
\
/* expose boolean functions from objects-inl.h */ \
- F(HasFastSmiElements, 1, 1) \
- F(HasFastSmiOrObjectElements, 1, 1) \
- F(HasFastObjectElements, 1, 1) \
+ F(HasFastSmiOnlyElements, 1, 1) \
+ F(HasFastElements, 1, 1) \
F(HasFastDoubleElements, 1, 1) \
- F(HasFastHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
F(HasExternalPixelElements, 1, 1) \
F(HasExternalArrayElements, 1, 1) \
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index a0c8f2cba1..a5d61ebb59 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -2295,6 +2295,8 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
first_unswept_page_ = p;
}
+ heap()->LowerOldGenLimits(freed_bytes);
+
heap()->FreeQueuedChunks();
return IsSweepingComplete();
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index bf711bac71..35f7be5416 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -427,7 +427,7 @@ void StringStream::PrintMentionedObjectCache() {
PrintUsingMap(JSObject::cast(printee));
if (printee->IsJSArray()) {
JSArray* array = JSArray::cast(printee);
- if (array->HasFastObjectElements()) {
+ if (array->HasFastElements()) {
unsigned int limit = FixedArray::cast(array->elements())->length();
unsigned int length =
static_cast<uint32_t>(JSArray::cast(array)->length()->Number());
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 6db9c77edc..94a5264380 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -236,6 +236,8 @@ namespace internal {
SC(math_sin, V8.MathSin) \
SC(math_sqrt, V8.MathSqrt) \
SC(math_tan, V8.MathTan) \
+ SC(array_bounds_checks_seen, V8.ArrayBoundsChecksSeen) \
+ SC(array_bounds_checks_removed, V8.ArrayBoundsChecksRemoved) \
SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \
SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
SC(stack_interrupts, V8.StackInterrupts) \
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index bb587e1733..c73222a29b 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -199,13 +199,10 @@ Vector<const char> ReadFile(FILE* file,
bool verbose = true);
-// Copy from ASCII/16bit chars to ASCII/16bit chars.
-template <typename sourcechar, typename sinkchar>
-INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
-
+// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
-void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 534622ef58..8215639e5b 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 11
-#define BUILD_NUMBER 7
-#define PATCH_LEVEL 0
+#define MINOR_VERSION 10
+#define BUILD_NUMBER 8
+#define PATCH_LEVEL 13
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 9f5f850294..60b29e6475 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -629,8 +629,7 @@ class Assembler : public AssemblerBase {
static const byte kJccShortPrefix = 0x70;
static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
static const byte kJcShortOpcode = kJccShortPrefix | carry;
- static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
- static const byte kJzShortOpcode = kJccShortPrefix | zero;
+
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 0af0a43477..4e037ff465 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -977,7 +977,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -1076,8 +1076,7 @@ static void AllocateJSArray(MacroAssembler* masm,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
+ __ LoadInitialArrayMap(array_function, scratch, elements_array);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ testq(array_size, array_size);
@@ -1304,10 +1303,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(call_generic_code);
__ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// rbx: JSArray
__ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
r11,
kScratchRegister,
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 61d6c87911..d179d2a5d5 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -2864,37 +2864,30 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 9;
+ static const int kRegExpExecuteArguments = 8;
int argument_slots_on_stack =
masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
__ EnterApiExitFrame(argument_slots_on_stack);
- // Argument 9: Pass current isolate address.
+ // Argument 8: Pass current isolate address.
// __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
// Immediate(ExternalReference::isolate_address()));
__ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
kScratchRegister);
- // Argument 8: Indicate that this is a direct call from JavaScript.
+ // Argument 7: Indicate that this is a direct call from JavaScript.
__ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
Immediate(1));
- // Argument 7: Start (high end) of backtracking stack memory area.
+ // Argument 6: Start (high end) of backtracking stack memory area.
__ movq(kScratchRegister, address_of_regexp_stack_memory_address);
__ movq(r9, Operand(kScratchRegister, 0));
__ movq(kScratchRegister, address_of_regexp_stack_memory_size);
__ addq(r9, Operand(kScratchRegister, 0));
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- // Argument 6 is passed in r9 on Linux and on the stack on Windows.
+ // Argument 6 passed in r9 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
- Immediate(0));
-#else
- __ Set(r9, 0);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
#endif
// Argument 5: static offsets vector buffer.
@@ -2902,7 +2895,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference::address_of_static_offsets_vector(isolate));
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
#endif
// First four arguments are passed in registers on both Linux and Windows.
@@ -2967,9 +2960,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
Label exception;
- __ cmpl(rax, Immediate(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
__ j(equal, &success, Label::kNear);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
__ j(equal, &exception);
@@ -6002,12 +5993,12 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
{ REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
{ REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiToDouble
+ // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
@@ -6281,9 +6272,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(rdi, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(rax, &smi_element);
- __ CheckFastSmiElements(rdi, &fast_elements);
+ __ CheckFastSmiOnlyElements(rdi, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -6301,7 +6292,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// place.
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
@@ -6315,8 +6306,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK);
__ ret(0);
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
- // FAST_*_ELEMENTS, and value is Smi.
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ SmiToInteger32(kScratchRegister, rcx);
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 2924810c1e..a8d39b25f6 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -220,7 +220,7 @@ ModuloFunction CreateModuloFunction() {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
@@ -241,7 +241,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
-void ElementsTransitionGenerator::GenerateSmiToDouble(
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- rax : value
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 1b29e58d59..eec83d9d1e 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -91,8 +91,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
}
-const bool Debug::FramePaddingLayout::kIsSupported = true;
-
#define __ ACCESS_MASM(masm)
@@ -105,12 +103,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Load padding words on stack.
- for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue));
- }
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize));
-
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as as two smis causing it to be untouched by GC.
@@ -165,11 +157,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
}
- // Read current padding counter and skip corresponding number of words.
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
-
// Get rid of the internal frame.
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 0738153588..7ed81b47e1 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1684,7 +1684,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
default:
UNREACHABLE();
}
- AppendToBuffer("test%c rax,0x%" V8_PTR_PREFIX "x",
+ AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"x",
operand_size_code(),
value);
break;
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 0db7424eb2..974269e5dc 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -659,7 +659,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
ToBooleanStub stub(result_register());
__ push(result_register());
- __ CallStub(&stub, condition->test_id());
+ __ CallStub(&stub);
__ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -1659,8 +1659,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_constant_fast_elements =
- IsFastObjectElementsKind(constant_elements_kind);
+ bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1671,7 +1670,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
@@ -1683,9 +1682,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1713,9 +1713,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
+ if (constant_elements_kind == FAST_ELEMENTS) {
+ // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
+ // transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
@@ -2287,7 +2287,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
CallFunctionStub stub(arg_count, flags);
__ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->id());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 82fdb3cece..0632ce439f 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -769,25 +769,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &non_double_value);
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
rbx,
rdi,
&slow);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
rbx,
rdi,
&slow);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1642,7 +1642,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ movq(rax, rdx);
__ Ret();
__ bind(&fail);
@@ -1741,11 +1741,11 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(address());
}
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1766,18 +1766,14 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
address, test_instruction_address, delta);
}
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
+ // Patch with a short conditional jump. There must be a
+ // short jump-if-carry/not-carry at this position.
Address jmp_address = test_instruction_address - delta;
- ASSERT((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+ ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode);
+ Condition cc = *jmp_address == Assembler::kJncShortOpcode
+ ? not_zero
+ : zero;
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index f1c631bc5f..85e7ac0bf0 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -2223,35 +2223,41 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register result = ToRegister(instr->result());
int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
Handle<String> name = instr->hydrogen()->name();
- Label done;
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- } else {
+
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ Move(rcx, instr->hydrogen()->name());
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
Label next;
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
__ j(not_equal, &next, Label::kNear);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ jmp(&done, Label::kNear);
__ bind(&next);
}
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ j(not_equal, &generic, Label::kNear);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ jmp(&done, Label::kNear);
+ __ bind(&generic);
+ __ Move(rcx, instr->hydrogen()->name());
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
}
- if (need_generic) {
- __ Move(rcx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
}
@@ -2324,10 +2330,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
__ and_(temp, Immediate(Map::kElementsKindMask));
__ shr(temp, Immediate(Map::kElementsKindShift));
- __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
+ __ cmpl(temp, Immediate(FAST_ELEMENTS));
+ __ j(equal, &ok, Label::kNear);
__ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ j(less, &fail, Label::kNear);
__ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@@ -2371,20 +2375,11 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register result = ToRegister(instr->result());
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits.
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
// Load the result.
__ movq(result,
- BuildFastArrayOperand(instr->elements(),
- instr->key(),
+ BuildFastArrayOperand(instr->elements(), instr->key(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
+ FixedArray::kHeaderSize - kHeapObjectTag));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2398,32 +2393,19 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
- __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
-
- Operand double_load_operand = BuildFastArrayOperand(
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ offset);
+ __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+
+ Operand double_load_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ movsd(result, double_load_operand);
}
@@ -2432,8 +2414,7 @@ Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
+ uint32_t offset) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
@@ -2442,14 +2423,11 @@ Operand LCodeGen::BuildFastArrayOperand(
Abort("array index constant value too big");
}
return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
+ constant_value * (1 << shift_size) + offset);
} else {
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- offset + (additional_index << shift_size));
+ return Operand(elements_pointer_reg, ToRegister(key),
+ scale_factor, offset);
}
}
@@ -2458,17 +2436,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
+ instr->key(), elements_kind, 0));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
@@ -2505,11 +2473,8 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3373,18 +3338,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
+ instr->key(), elements_kind, 0));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
__ cvtsd2ss(value, value);
@@ -3410,11 +3364,8 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3457,29 +3408,30 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Operand operand =
- BuildFastArrayOperand(instr->object(),
- instr->key(),
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ int offset =
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(FieldOperand(elements, offset), value);
+ } else {
+ __ movq(FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ value);
}
- __ movq(operand, value);
-
if (instr->hydrogen()->NeedsWriteBarrier()) {
- ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ lea(key, operand);
+ __ lea(key, FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
__ RecordWrite(elements,
key,
value,
@@ -3508,19 +3460,8 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
+ instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ movsd(double_store_operand, value);
}
@@ -3549,22 +3490,21 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable);
__ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
__ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
ASSERT_NE(instr->temp_reg(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp_reg()), kDontSaveFPRegs);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(rdx));
ASSERT(new_map_reg.is(rbx));
__ movq(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(rdx));
ASSERT(new_map_reg.is(rbx));
@@ -4238,9 +4178,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
// Load the map's "bit field 2".
@@ -4386,11 +4325,10 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
+ // Deopt if the literal boilerplate ElementsKind is of a type different than
+ // the expected one. The check isn't necessary if the boilerplate has already
+ // been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
__ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset));
// Load the map's "bit field 2".
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 73e1a9b966..1331fba555 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -231,8 +231,7 @@ class LCodeGen BASE_EMBEDDED {
LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
+ uint32_t offset);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 6094dbb947..3ba0cae7ad 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -2012,9 +2012,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 642a0a0038..9083c1f8db 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -1199,7 +1199,6 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1216,13 +1215,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@@ -1236,7 +1235,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1694,7 +1692,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1719,7 +1716,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* value() { return inputs_[2]; }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1743,7 +1739,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 95b43f42a4..53becf6c3c 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -2658,12 +2658,10 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
+ Immediate(Map::kMaximumBitField2FastElementValue));
j(above, fail, distance);
}
@@ -2671,26 +2669,23 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
+ Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
+ Immediate(Map::kMaximumBitField2FastElementValue));
j(above, fail, distance);
}
-void MacroAssembler::CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
+ Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
j(above, fail, distance);
}
@@ -2754,18 +2749,24 @@ void MacroAssembler::CompareMap(Register obj,
CompareMapMode mode) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- Cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_fast_element_map));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_double_map));
}
}
}
@@ -4056,38 +4057,27 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
- movq(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- int offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- movq(map_in_out, FieldOperand(scratch, offset));
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
+ Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
movq(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
map_out,
scratch,
&done);
@@ -4198,7 +4188,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
+ masm_(Isolate::Current(), address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 1c1cd95e94..66587d5319 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -877,9 +877,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
+ void CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by index in
@@ -1141,8 +1141,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
- Register map_out,
- bool can_have_holes);
+ Register map_out);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index cb1e029317..bf232bff9b 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,23 +44,21 @@ namespace internal {
/*
* This assembler uses the following register assignment convention
- * - rdx : Currently loaded character(s) as ASCII or UC16. Must be loaded
- * using LoadCurrentCharacter before using any of the dispatch methods.
- * Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - rdi : Current position in input, as negative offset from end of string.
+ * - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - rdi : current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character
- * offset! Is always a 32-bit signed (negative) offset, but must be
+ * offset! Is always a 32-bit signed (negative) offset, but must be
* maintained sign-extended to 64 bits, since it is used as index.
- * - rsi : End of input (points to byte after last character in input),
+ * - rsi : end of input (points to byte after last character in input),
* so that rsi+rdi points to the current character.
- * - rbp : Frame pointer. Used to access arguments, local variables and
+ * - rbp : frame pointer. Used to access arguments, local variables and
* RegExp registers.
- * - rsp : Points to tip of C stack.
- * - rcx : Points to tip of backtrack stack. The backtrack stack contains
- * only 32-bit values. Most are offsets from some base (e.g., character
+ * - rsp : points to tip of C stack.
+ * - rcx : points to tip of backtrack stack. The backtrack stack contains
+ * only 32-bit values. Most are offsets from some base (e.g., character
* positions from end of string or code location from Code* pointer).
- * - r8 : Code object pointer. Used to convert between absolute and
+ * - r8 : code object pointer. Used to convert between absolute and
* code-object-relative addresses.
*
* The registers rax, rbx, r9 and r11 are free to use for computations.
@@ -74,22 +72,20 @@ namespace internal {
*
* The stack will have the following content, in some order, indexable from the
* frame pointer (see, e.g., kStackHighEnd):
- * - Isolate* isolate (address of the current isolate)
+ * - Isolate* isolate (Address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
- * - stack_area_base (high end of the memory area to use as
+ * - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (address of end of string)
- * - start of input (address of first character in string)
+ * - end of input (Address of end of string)
+ * - start of input (Address of first character in string)
* - start index (character index of start)
* - String* input_string (input string)
* - return address
* - backup of callee save registers (rbx, possibly rsi and rdi).
- * - success counter (only useful for global regexp to count matches)
* - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
+ * position -1). Used to initialize capture registers to a non-position.
* - At start of string (if 1, we are starting at the start of the
* string, otherwise 0)
* - register 0 rbp[-n] (Only positions must be stored in the first
@@ -98,7 +94,7 @@ namespace internal {
*
* The first num_saved_registers_ registers are initialized to point to
* "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out uninitialized.
+ * character of the string). The remaining registers starts out uninitialized.
*
* The first seven values must be provided by the calling code by
* calling the code's entry address cast to a function pointer with the
@@ -748,16 +744,13 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerX64::Fail() {
- STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
- if (!global()) {
- __ Set(rax, FAILURE);
- }
+ ASSERT(FAILURE == 0); // Return value for failure is zero.
+ __ Set(rax, 0);
__ jmp(&exit_label_);
}
Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
- Label return_rax;
// Finalize code - write the entry point code now we know how many
// registers we need.
// Entry code:
@@ -791,7 +784,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ASSERT_EQ(kInputStart, -3 * kPointerSize);
ASSERT_EQ(kInputEnd, -4 * kPointerSize);
ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
+ ASSERT_EQ(kStackHighEnd, -6 * kPointerSize);
__ push(rdi);
__ push(rsi);
__ push(rdx);
@@ -802,8 +795,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ push(rbx); // Callee-save
#endif
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -823,14 +815,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ Set(rax, EXCEPTION);
- __ jmp(&return_rax);
+ __ jmp(&exit_label_);
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
__ testq(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_rax);
+ __ j(not_zero, &exit_label_);
__ bind(&stack_ok);
@@ -855,7 +847,19 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
-#ifdef WIN32
+ if (num_saved_registers_ > 0) {
+ // Fill saved registers with initial value = start offset - 1
+ // Fill in stack push order, to avoid accessing across an unwritten
+ // page (a problem on Windows).
+ __ Set(rcx, kRegisterZero);
+ Label init_loop;
+ __ bind(&init_loop);
+ __ movq(Operand(rbp, rcx, times_1, 0), rax);
+ __ subq(rcx, Immediate(kPointerSize));
+ __ cmpq(rcx,
+ Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
+ __ j(greater, &init_loop);
+ }
// Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
@@ -865,50 +869,22 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ movq(register_location(i), rax); // One write every page.
}
-#endif // WIN32
+ // Initialize backtrack stack pointer.
+ __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
+ // Load previous char as initial value of current-character.
+ Label at_start;
+ __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+ __ j(equal, &at_start);
+ LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
+ __ jmp(&start_label_);
+ __ bind(&at_start);
__ Set(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) {
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- if (num_saved_registers_ > 8) {
- __ Set(rcx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
- __ subq(rcx, Immediate(kPointerSize));
- __ cmpq(rcx,
- Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
- __ j(greater, &init_loop);
- } else { // Unroll the loop.
- for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(register_location(i), rax);
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
-
__ jmp(&start_label_);
+
// Exit code:
if (success_label_.is_linked()) {
// Save captures when successful.
@@ -926,10 +902,6 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
for (int i = 0; i < num_saved_registers_; i++) {
__ movq(rax, register_location(i));
- if (i == 0 && global()) {
- // Keep capture start in rdx for the zero-length check later.
- __ movq(rdx, rax);
- }
__ addq(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
__ sar(rax, Immediate(1)); // Convert byte index to character index.
@@ -937,54 +909,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movl(Operand(rbx, i * kIntSize), rax);
}
}
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- // Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
- __ subq(rcx, Immediate(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmpq(rcx, Immediate(num_saved_registers_));
- __ j(less, &exit_label_);
-
- __ movq(Operand(rbp, kNumOutputRegisters), rcx);
- // Advance the location for output.
- __ addq(Operand(rbp, kRegisterOutput),
- Immediate(num_saved_registers_ * kIntSize));
-
- // Prepare rax to initialize registers with its value in the next run.
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
-
- // Special case for zero-length matches.
- // rdx: capture start index
- __ cmpq(rdi, rdx);
- // Not a zero-length match, restart.
- __ j(not_equal, &load_char_start_regexp);
- // rdi (offset from the end) is zero if we already reached the end.
- __ testq(rdi, rdi);
- __ j(zero, &exit_label_, Label::kNear);
- // Advance current position after a zero-length match.
- if (mode_ == UC16) {
- __ addq(rdi, Immediate(2));
- } else {
- __ incq(rdi);
- }
- __ jmp(&load_char_start_regexp);
- } else {
- __ movq(rax, Immediate(SUCCESS));
- }
+ __ Set(rax, SUCCESS);
}
+ // Exit and return rax
__ bind(&exit_label_);
- if (global()) {
- // Return the number of successful captures.
- __ movq(rax, Operand(rbp, kSuccessfulCaptures));
- }
- __ bind(&return_rax);
#ifdef _WIN64
// Restore callee save registers.
__ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
@@ -1021,7 +951,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ testq(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
- __ j(not_zero, &return_rax);
+ __ j(not_zero, &exit_label_);
// Restore registers.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -1082,7 +1012,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ Set(rax, EXCEPTION);
- __ jmp(&return_rax);
+ __ jmp(&exit_label_);
}
FixupCodeRelativePositions();
@@ -1205,9 +1135,8 @@ void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
}
-bool RegExpMacroAssemblerX64::Succeed() {
+void RegExpMacroAssemblerX64::Succeed() {
__ jmp(&success_label_);
- return global();
}
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index 31fc8efd93..cd24b60981 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -109,7 +109,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
+ virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@@ -154,12 +154,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value. NumOutputRegisters is passed as 32-bit value. The upper
- // 32 bit of this 64-bit stack slot may contain garbage.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@@ -172,12 +167,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex - kPointerSize;
static const int kInputEnd = kInputStart - kPointerSize;
static const int kRegisterOutput = kInputEnd - kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value.
- static const int kNumOutputRegisters = kRegisterOutput - kPointerSize;
- static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput - kPointerSize;
+ static const int kDirectCall = kFrameAlign;
static const int kIsolate = kDirectCall + kPointerSize;
#endif
@@ -192,14 +183,14 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kNumOutputRegisters - kPointerSize;
+ static const int kBackup_rbx = kStackHighEnd - kPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
- static const int kSuccessfulCaptures = kLastCalleeSaveRegister - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kInputStartMinusOne =
+ kLastCalleeSaveRegister - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index 8aba70181f..df8423a654 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,12 +40,12 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
+ const byte*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 1b8ed3848c..5721e9b373 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -1434,32 +1434,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiElements(rbx, &call_builtin);
+ __ CheckFastSmiOnlyElements(rbx, &call_builtin);
// rdx: receiver
// rbx: map
-
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ __ movq(r9, rdi); // Backup rdi as it is going to be trashed.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
rbx,
rdi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
- // Restore edi.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- rbx,
- rdi,
&call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ __ movq(rdi, r9);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(rbx, &call_builtin);
@@ -3384,11 +3369,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
break;
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3453,11 +3435,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3608,7 +3587,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(rax, &transition_elements_kind);
}
@@ -3632,13 +3611,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ j(not_equal, &miss_force_generic);
__ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ SmiToInteger32(rcx, rcx);
__ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
} else {
// Do the store and update the write barrier.
- ASSERT(IsFastObjectElementsKind(elements_kind));
+ ASSERT(elements_kind == FAST_ELEMENTS);
__ SmiToInteger32(rcx, rcx);
__ lea(rcx,
FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 3cbc3bc451..af28be19d8 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -27,7 +27,6 @@
prefix cctest
-# All tests prefixed with 'Bug' are expected to fail.
test-api/Bug*: FAIL
##############################################################################
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 762cc9f0fa..8f405b726e 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -400,41 +400,3 @@ TEST(AssignmentAndCall) {
// See MultipleAssignments test.
CheckFunctionName(script, "return 2", "Enclosing.Bar");
}
-
-
-TEST(MethodAssignmentInAnonymousFunctionCall) {
- InitializeVM();
- v8::HandleScope scope;
-
- v8::Handle<v8::Script> script = Compile(
- "(function () {\n"
- " var EventSource = function () { };\n"
- " EventSource.prototype.addListener = function () {\n"
- " return 2012;\n"
- " };\n"
- " this.PublicEventSource = EventSource;\n"
- "})();");
- CheckFunctionName(script, "return 2012", "EventSource.addListener");
-}
-
-
-TEST(ReturnAnonymousFunction) {
- InitializeVM();
- v8::HandleScope scope;
-
- v8::Handle<v8::Script> script = Compile(
- "(function() {\n"
- " function wrapCode() {\n"
- " return function () {\n"
- " return 2012;\n"
- " };\n"
- " };\n"
- " var foo = 10;\n"
- " function f() {\n"
- " return wrapCode();\n"
- " }\n"
- " this.ref = f;\n"
- "})()");
- script->Run();
- CheckFunctionName(script, "return 2012", "");
-}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index c405b334c9..3ac17414a0 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -7,7 +7,6 @@
#include "v8.h"
#include "cctest.h"
-#include "hashmap.h"
#include "heap-profiler.h"
#include "snapshot.h"
#include "debug.h"
@@ -28,30 +27,22 @@ class NamedEntriesDetector {
if (strcmp(entry->name(), "C2") == 0) has_C2 = true;
}
- static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
- }
-
void CheckAllReachables(i::HeapEntry* root) {
- i::HashMap visited(AddressesMatch);
i::List<i::HeapEntry*> list(10);
list.Add(root);
+ root->paint();
CheckEntry(root);
while (!list.is_empty()) {
i::HeapEntry* entry = list.RemoveLast();
- i::Vector<i::HeapGraphEdge*> children = entry->children();
+ i::Vector<i::HeapGraphEdge> children = entry->children();
for (int i = 0; i < children.length(); ++i) {
- if (children[i]->type() == i::HeapGraphEdge::kShortcut) continue;
- i::HeapEntry* child = children[i]->to();
- i::HashMap::Entry* entry = visited.Lookup(
- reinterpret_cast<void*>(child),
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(child)),
- true);
- if (entry->value)
- continue;
- entry->value = reinterpret_cast<void*>(1);
- list.Add(child);
- CheckEntry(child);
+ if (children[i].type() == i::HeapGraphEdge::kShortcut) continue;
+ i::HeapEntry* child = children[i].to();
+ if (!child->painted()) {
+ list.Add(child);
+ child->paint();
+ CheckEntry(child);
+ }
}
}
}
@@ -114,6 +105,9 @@ TEST(HeapSnapshot) {
"var c2 = new C2(a2);");
const v8::HeapSnapshot* snapshot_env2 =
v8::HeapProfiler::TakeSnapshot(v8_str("env2"));
+ i::HeapSnapshot* i_snapshot_env2 =
+ const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
// Verify, that JS global object of env2 has '..2' properties.
@@ -126,7 +120,9 @@ TEST(HeapSnapshot) {
NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_2"));
CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "c2"));
+ // Paint all nodes reachable from global object.
NamedEntriesDetector det;
+ i_snapshot_env2->ClearPaint();
det.CheckAllReachables(const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(global_env2)));
CHECK(det.has_A2);
@@ -160,9 +156,9 @@ TEST(HeapSnapshotObjectSizes) {
CHECK_NE(NULL, x2);
// Test sizes.
- CHECK_NE(0, x->GetSelfSize());
- CHECK_NE(0, x1->GetSelfSize());
- CHECK_NE(0, x2->GetSelfSize());
+ CHECK_EQ(x->GetSelfSize() * 3, x->GetRetainedSize());
+ CHECK_EQ(x1->GetSelfSize(), x1->GetRetainedSize());
+ CHECK_EQ(x2->GetSelfSize(), x2->GetRetainedSize());
}
@@ -481,6 +477,66 @@ TEST(HeapSnapshotRootPreservedAfterSorting) {
}
+TEST(HeapEntryDominator) {
+ // The graph looks like this:
+ //
+ // -> node1
+ // a |^
+ // -> node5 ba
+ // a v|
+ // node6 -> node2
+ // b a |^
+ // -> node4 ba
+ // b v|
+ // -> node3
+ //
+ // The dominator for all nodes is node6.
+
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileRun(
+ "function X(a, b) { this.a = a; this.b = b; }\n"
+ "node6 = new X(new X(new X()), new X(new X(),new X()));\n"
+ "(function(){\n"
+ "node6.a.a.b = node6.b.a; // node1 -> node2\n"
+ "node6.b.a.a = node6.a.a; // node2 -> node1\n"
+ "node6.b.a.b = node6.b.b; // node2 -> node3\n"
+ "node6.b.b.a = node6.b.a; // node3 -> node2\n"
+ "})();");
+
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8_str("dominators"));
+
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ CHECK_NE(NULL, global);
+ const v8::HeapGraphNode* node6 =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "node6");
+ CHECK_NE(NULL, node6);
+ const v8::HeapGraphNode* node5 =
+ GetProperty(node6, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, node5);
+ const v8::HeapGraphNode* node4 =
+ GetProperty(node6, v8::HeapGraphEdge::kProperty, "b");
+ CHECK_NE(NULL, node4);
+ const v8::HeapGraphNode* node3 =
+ GetProperty(node4, v8::HeapGraphEdge::kProperty, "b");
+ CHECK_NE(NULL, node3);
+ const v8::HeapGraphNode* node2 =
+ GetProperty(node4, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, node2);
+ const v8::HeapGraphNode* node1 =
+ GetProperty(node5, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, node1);
+
+ CHECK_EQ(node6, node1->GetDominatorNode());
+ CHECK_EQ(node6, node2->GetDominatorNode());
+ CHECK_EQ(node6, node3->GetDominatorNode());
+ CHECK_EQ(node6, node4->GetDominatorNode());
+ CHECK_EQ(node6, node5->GetDominatorNode());
+}
+
+
namespace {
class TestJSONStream : public v8::OutputStream {
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 33aaed3342..72079dc2ae 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -673,7 +673,7 @@ TEST(JSArray) {
array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked();
CHECK_EQ(Smi::FromInt(0), array->length());
// Must be in fast mode.
- CHECK(array->HasFastSmiOrObjectElements());
+ CHECK(array->HasFastTypeElements());
// array[length] = name.
array->SetElement(0, *name, NONE, kNonStrictMode)->ToObjectChecked();
@@ -811,9 +811,7 @@ TEST(Iteration) {
// Allocate a JS array to OLD_POINTER_SPACE and NEW_SPACE
objs[next_objs_index++] = FACTORY->NewJSArray(10);
- objs[next_objs_index++] = FACTORY->NewJSArray(10,
- FAST_HOLEY_ELEMENTS,
- TENURED);
+ objs[next_objs_index++] = FACTORY->NewJSArray(10, FAST_ELEMENTS, TENURED);
// Allocate a small string to OLD_DATA_SPACE and NEW_SPACE
objs[next_objs_index++] =
@@ -1597,7 +1595,7 @@ TEST(PrototypeTransitionClearing) {
Handle<JSObject> prototype;
PagedSpace* space = HEAP->old_pointer_space();
do {
- prototype = FACTORY->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
+ prototype = FACTORY->NewJSArray(32 * KB, FAST_ELEMENTS, TENURED);
} while (space->FirstPage() == space->LastPage() ||
!space->LastPage()->Contains(prototype->address()));
@@ -1737,60 +1735,3 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
CHECK(HEAP->InNewSpace(*o));
}
-
-
-static int CountMapTransitions(Map* map) {
- int result = 0;
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->IsTransitionOnly(i)) {
- result++;
- }
- }
- return result;
-}
-
-
-// Test that map transitions are cleared and maps are collected with
-// incremental marking as well.
-TEST(Regress1465) {
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_trace_incremental_marking = true;
- InitializeVM();
- v8::HandleScope scope;
-
- #define TRANSITION_COUNT 256
- for (int i = 0; i < TRANSITION_COUNT; i++) {
- EmbeddedVector<char, 64> buffer;
- OS::SNPrintF(buffer, "var o = new Object; o.prop%d = %d;", i, i);
- CompileRun(buffer.start());
- }
- CompileRun("var root = new Object;");
- Handle<JSObject> root =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Object>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("root"))));
-
- // Count number of live transitions before marking.
- int transitions_before = CountMapTransitions(root->map());
- CompileRun("%DebugPrint(root);");
- CHECK_EQ(TRANSITION_COUNT, transitions_before);
-
- // Go through all incremental marking steps in one swoop.
- IncrementalMarking* marking = HEAP->incremental_marking();
- CHECK(marking->IsStopped());
- marking->Start();
- CHECK(marking->IsMarking());
- while (!marking->IsComplete()) {
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- }
- CHECK(marking->IsComplete());
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(marking->IsStopped());
-
- // Count number of live transitions after marking. Note that one transition
- // is left, because 'o' still holds an instance of one transition target.
- int transitions_after = CountMapTransitions(root->map());
- CompileRun("%DebugPrint(root);");
- CHECK_EQ(1, transitions_after);
-}
diff --git a/deps/v8/test/cctest/test-list.cc b/deps/v8/test/cctest/test-list.cc
index 4c78f02cee..7520b05fcb 100644
--- a/deps/v8/test/cctest/test-list.cc
+++ b/deps/v8/test/cctest/test-list.cc
@@ -130,18 +130,6 @@ TEST(RemoveLast) {
}
-TEST(Allocate) {
- List<int> list(4);
- list.Add(1);
- CHECK_EQ(1, list.length());
- list.Allocate(100);
- CHECK_EQ(100, list.length());
- CHECK_LE(100, list.capacity());
- list[99] = 123;
- CHECK_EQ(123, list[99]);
-}
-
-
TEST(Clear) {
List<int> list(4);
CHECK_EQ(0, list.length());
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 27123704b1..83a576d367 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -531,18 +531,18 @@ TEST(BootUpMemoryUse) {
// there we just skip the test.
if (initial_memory >= 0) {
InitializeVM();
- intptr_t delta = MemoryInUse() - initial_memory;
+ intptr_t booted_memory = MemoryInUse();
if (sizeof(initial_memory) == 8) {
if (v8::internal::Snapshot::IsEnabled()) {
- CHECK_LE(delta, 3600 * 1024); // 3396.
+ CHECK_LE(booted_memory - initial_memory, 3600 * 1024); // 3396.
} else {
- CHECK_LE(delta, 4000 * 1024); // 3948.
+ CHECK_LE(booted_memory - initial_memory, 3600 * 1024); // 3432.
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
- CHECK_LE(delta, 2600 * 1024); // 2484.
+ CHECK_LE(booted_memory - initial_memory, 2600 * 1024); // 2484.
} else {
- CHECK_LE(delta, 2950 * 1024); // 2844
+ CHECK_LE(booted_memory - initial_memory, 2950 * 1024); // 2844
}
}
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 9b4f905e2c..e89e6cddc9 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -506,13 +506,8 @@ static RegExpNode* Compile(const char* input, bool multiline, bool is_ascii) {
NewStringFromUtf8(CStrVector(input));
Handle<String> sample_subject =
isolate->factory()->NewStringFromUtf8(CStrVector(""));
- RegExpEngine::Compile(&compile_data,
- false,
- false,
- multiline,
- pattern,
- sample_subject,
- is_ascii);
+ RegExpEngine::Compile(
+ &compile_data, false, multiline, pattern, sample_subject, is_ascii);
return compile_data.node;
}
@@ -725,7 +720,6 @@ static ArchRegExpMacroAssembler::Result Execute(Code* code,
input_start,
input_end,
captures,
- 0,
Isolate::Current());
}
@@ -1004,11 +998,11 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
int output[4];
NativeRegExpMacroAssembler::Result result =
Execute(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length() * 2,
- output);
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length() * 2,
+ output);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index f1387e8a4f..b2eabc44eb 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -53,8 +53,6 @@ class CcTestCase(test.TestCase):
serialization_file = join('obj', 'test', self.mode, 'serdes')
else:
serialization_file = join('obj', 'serdes')
- if not exists(join(self.context.buildspace, 'obj')):
- os.makedirs(join(self.context.buildspace, 'obj'))
serialization_file += '_' + self.GetName()
serialization_file = join(self.context.buildspace, serialization_file)
serialization_file += ''.join(self.variant_flags).replace('-', '_')
diff --git a/deps/v8/test/mjsunit/accessor-map-sharing.js b/deps/v8/test/mjsunit/accessor-map-sharing.js
deleted file mode 100644
index 8bbcb4f5dc..0000000000
--- a/deps/v8/test/mjsunit/accessor-map-sharing.js
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --fast-accessor-properties
-
-// Handy abbreviations.
-var dp = Object.defineProperty;
-var gop = Object.getOwnPropertyDescriptor;
-
-function getter() { return 111; }
-function setter(x) { print(222); }
-function anotherGetter() { return 333; }
-function anotherSetter(x) { print(444); }
-var obj1, obj2;
-
-// Two objects with the same getter.
-obj1 = {};
-dp(obj1, "alpha", { get: getter });
-obj2 = {};
-dp(obj2, "alpha", { get: getter });
-assertTrue(%HaveSameMap(obj1, obj2));
-
-// Two objects with the same getter, oldskool.
-obj1 = {};
-obj1.__defineGetter__("bravo", getter);
-assertEquals(getter, obj1.__lookupGetter__("bravo"));
-obj2 = {};
-obj2.__defineGetter__("bravo", getter);
-assertEquals(getter, obj2.__lookupGetter__("bravo"));
-assertTrue(%HaveSameMap(obj1, obj2));
-
-// Two objects with the same setter.
-obj1 = {};
-dp(obj1, "charlie", { set: setter });
-obj2 = {};
-dp(obj2, "charlie", { set: setter });
-assertTrue(%HaveSameMap(obj1, obj2));
-
-// Two objects with the same setter, oldskool.
-obj1 = {};
-obj1.__defineSetter__("delta", setter);
-assertEquals(setter, obj1.__lookupSetter__("delta"));
-obj2 = {};
-obj2.__defineSetter__("delta", setter);
-assertEquals(setter, obj2.__lookupSetter__("delta"));
-assertTrue(%HaveSameMap(obj1, obj2));
-
-// Two objects with the same getter and setter.
-obj1 = {};
-dp(obj1, "foxtrot", { get: getter, set: setter });
-obj2 = {};
-dp(obj2, "foxtrot", { get: getter, set: setter });
-assertTrue(%HaveSameMap(obj1, obj2));
-
-// Two objects with the same getter and setter, set separately.
-obj1 = {};
-dp(obj1, "golf", { get: getter, configurable: true });
-dp(obj1, "golf", { set: setter, configurable: true });
-obj2 = {};
-dp(obj2, "golf", { get: getter, configurable: true });
-dp(obj2, "golf", { set: setter, configurable: true });
-assertTrue(%HaveSameMap(obj1, obj2));
-
-// Two objects with the same getter and setter, set separately, oldskool.
-obj1 = {};
-obj1.__defineGetter__("hotel", getter);
-obj1.__defineSetter__("hotel", setter);
-obj2 = {};
-obj2.__defineGetter__("hotel", getter);
-obj2.__defineSetter__("hotel", setter);
-assertTrue(%HaveSameMap(obj1, obj2));
-
-// Attribute-only change, shouldn't affect previous descriptor properties.
-obj1 = {};
-dp(obj1, "india", { get: getter, configurable: true, enumerable: true });
-assertEquals(getter, gop(obj1, "india").get);
-assertTrue(gop(obj1, "india").configurable);
-assertTrue(gop(obj1, "india").enumerable);
-dp(obj1, "india", { enumerable: false });
-assertEquals(getter, gop(obj1, "india").get);
-assertTrue(gop(obj1, "india").configurable);
-assertFalse(gop(obj1, "india").enumerable);
-
-// Attribute-only change, shouldn't affect objects with previously shared maps.
-obj1 = {};
-dp(obj1, "juliet", { set: setter, configurable: true, enumerable: false });
-assertEquals(setter, gop(obj1, "juliet").set);
-assertTrue(gop(obj1, "juliet").configurable);
-assertFalse(gop(obj1, "juliet").enumerable);
-obj2 = {};
-dp(obj2, "juliet", { set: setter, configurable: true, enumerable: false });
-assertEquals(setter, gop(obj2, "juliet").set);
-assertTrue(gop(obj2, "juliet").configurable);
-assertFalse(gop(obj2, "juliet").enumerable);
-dp(obj1, "juliet", { set: setter, configurable: false, enumerable: true });
-assertEquals(setter, gop(obj1, "juliet").set);
-assertFalse(gop(obj1, "juliet").configurable);
-assertTrue(gop(obj1, "juliet").enumerable);
-assertEquals(setter, gop(obj2, "juliet").set);
-assertTrue(gop(obj2, "juliet").configurable);
-assertFalse(gop(obj2, "juliet").enumerable);
-
-// Two objects with the different getters.
-obj1 = {};
-dp(obj1, "kilo", { get: getter });
-obj2 = {};
-dp(obj2, "kilo", { get: anotherGetter });
-assertEquals(getter, gop(obj1, "kilo").get);
-assertEquals(anotherGetter, gop(obj2, "kilo").get);
-assertFalse(%HaveSameMap(obj1, obj2));
-
-// Two objects with the same getters and different setters.
-obj1 = {};
-dp(obj1, "lima", { get: getter, set: setter });
-obj2 = {};
-dp(obj2, "lima", { get: getter, set: anotherSetter });
-assertEquals(setter, gop(obj1, "lima").set);
-assertEquals(anotherSetter, gop(obj2, "lima").set);
-assertFalse(%HaveSameMap(obj1, obj2));
-
-// Even 'undefined' is a kind of getter.
-obj1 = {};
-dp(obj1, "mike", { get: undefined });
-assertTrue("mike" in obj1);
-assertEquals(undefined, gop(obj1, "mike").get);
-assertEquals(undefined, obj1.__lookupGetter__("mike"));
-assertEquals(undefined, gop(obj1, "mike").set);
-assertEquals(undefined, obj1.__lookupSetter__("mike"));
-
-// Even 'undefined' is a kind of setter.
-obj1 = {};
-dp(obj1, "november", { set: undefined });
-assertTrue("november" in obj1);
-assertEquals(undefined, gop(obj1, "november").get);
-assertEquals(undefined, obj1.__lookupGetter__("november"));
-assertEquals(undefined, gop(obj1, "november").set);
-assertEquals(undefined, obj1.__lookupSetter__("november"));
-
-// Redefining a data property.
-obj1 = {};
-obj1.oscar = 12345;
-dp(obj1, "oscar", { set: setter });
-assertEquals(setter, gop(obj1, "oscar").set);
-
-// Re-adding the same getter/attributes pair.
-obj1 = {};
-dp(obj1, "papa", { get: getter, configurable: true });
-dp(obj1, "papa", { get: getter, set: setter, configurable: true });
-assertEquals(getter, gop(obj1, "papa").get);
-assertEquals(setter, gop(obj1, "papa").set);
-assertTrue(gop(obj1, "papa").configurable);
-assertFalse(gop(obj1, "papa").enumerable);
diff --git a/deps/v8/test/mjsunit/array-construct-transition.js b/deps/v8/test/mjsunit/array-construct-transition.js
index f8d7c830e5..577e321a55 100644
--- a/deps/v8/test/mjsunit/array-construct-transition.js
+++ b/deps/v8/test/mjsunit/array-construct-transition.js
@@ -27,13 +27,13 @@
// Flags: --allow-natives-syntax --smi-only-arrays
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6));
+support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6,7,8));
if (support_smi_only_arrays) {
var a = new Array(0, 1, 2);
- assertTrue(%HasFastSmiElements(a));
+ assertTrue(%HasFastSmiOnlyElements(a));
var b = new Array(0.5, 1.2, 2.3);
assertTrue(%HasFastDoubleElements(b));
var c = new Array(0.5, 1.2, new Object());
- assertTrue(%HasFastObjectElements(c));
+ assertTrue(%HasFastElements(c));
}
diff --git a/deps/v8/test/mjsunit/array-literal-transitions.js b/deps/v8/test/mjsunit/array-literal-transitions.js
index a96719d448..f657525eb6 100644
--- a/deps/v8/test/mjsunit/array-literal-transitions.js
+++ b/deps/v8/test/mjsunit/array-literal-transitions.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,7 +33,7 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
-support_smi_only_arrays = %HasFastSmiElements([1,2,3,4,5,6,7,8,9,10]);
+support_smi_only_arrays = %HasFastSmiOnlyElements([1,2,3,4,5,6,7,8,9,10]);
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
@@ -46,14 +46,14 @@ function get(foo) { return foo; } // Used to generate dynamic values.
function array_literal_test() {
var a0 = [1, 2, 3];
- assertTrue(%HasFastSmiElements(a0));
+ assertTrue(%HasFastSmiOnlyElements(a0));
var a1 = [get(1), get(2), get(3)];
- assertTrue(%HasFastSmiElements(a1));
+ assertTrue(%HasFastSmiOnlyElements(a1));
var b0 = [1, 2, get("three")];
- assertTrue(%HasFastObjectElements(b0));
+ assertTrue(%HasFastElements(b0));
var b1 = [get(1), get(2), get("three")];
- assertTrue(%HasFastObjectElements(b1));
+ assertTrue(%HasFastElements(b1));
var c0 = [1, 2, get(3.5)];
assertTrue(%HasFastDoubleElements(c0));
@@ -75,7 +75,7 @@ function array_literal_test() {
var object = new Object();
var d0 = [1, 2, object];
- assertTrue(%HasFastObjectElements(d0));
+ assertTrue(%HasFastElements(d0));
assertEquals(object, d0[2]);
assertEquals(2, d0[1]);
assertEquals(1, d0[0]);
@@ -87,7 +87,7 @@ function array_literal_test() {
assertEquals(1, e0[0]);
var f0 = [1, 2, [1, 2]];
- assertTrue(%HasFastObjectElements(f0));
+ assertTrue(%HasFastElements(f0));
assertEquals([1,2], f0[2]);
assertEquals(2, f0[1]);
assertEquals(1, f0[0]);
@@ -115,9 +115,9 @@ if (support_smi_only_arrays) {
large =
[ 0, 1, 2, 3, 4, 5, d(), d(), d(), d(), d(), d(), o(), o(), o(), o() ];
assertFalse(%HasDictionaryElements(large));
- assertFalse(%HasFastSmiElements(large));
+ assertFalse(%HasFastSmiOnlyElements(large));
assertFalse(%HasFastDoubleElements(large));
- assertTrue(%HasFastObjectElements(large));
+ assertTrue(%HasFastElements(large));
assertEquals(large,
[0, 1, 2, 3, 4, 5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5,
new Object(), new Object(), new Object(), new Object()]);
diff --git a/deps/v8/test/mjsunit/big-array-literal.js b/deps/v8/test/mjsunit/big-array-literal.js
index 8e0ff87277..a0fad7c2ee 100644
--- a/deps/v8/test/mjsunit/big-array-literal.js
+++ b/deps/v8/test/mjsunit/big-array-literal.js
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// On MacOS, this test needs a stack size of at least 538 kBytes.
-// Flags: --stack-size=600
-
// Test that we can make large object literals that work.
// Also test that we can attempt to make even larger object literals without
// crashing.
diff --git a/deps/v8/test/mjsunit/compiler/inline-construct.js b/deps/v8/test/mjsunit/compiler/inline-construct.js
index 7a3f1e44bd..af9e69c940 100644
--- a/deps/v8/test/mjsunit/compiler/inline-construct.js
+++ b/deps/v8/test/mjsunit/compiler/inline-construct.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --inline-construct
+// Flags: --allow-natives-syntax --expose-gc --inline-construct
// Test inlining of constructor calls.
@@ -68,9 +68,7 @@ function TestInAllContexts(constructor) {
%DeoptimizeFunction(value_context);
%DeoptimizeFunction(test_context);
%DeoptimizeFunction(effect_context);
- %ClearFunctionTypeFeedback(value_context);
- %ClearFunctionTypeFeedback(test_context);
- %ClearFunctionTypeFeedback(effect_context);
+ gc(); // Makes V8 forget about type information for *_context.
}
diff --git a/deps/v8/test/mjsunit/debug-liveedit-stack-padding.js b/deps/v8/test/mjsunit/debug-liveedit-stack-padding.js
deleted file mode 100644
index 36de356973..0000000000
--- a/deps/v8/test/mjsunit/debug-liveedit-stack-padding.js
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-
-Debug = debug.Debug;
-
-SlimFunction = eval(
- "(function() {\n " +
- " return 'Cat';\n" +
- "})\n"
-);
-
-var script = Debug.findScript(SlimFunction);
-
-Debug.setScriptBreakPointById(script.id, 1, 0);
-
-var orig_animal = "'Cat'";
-var patch_pos = script.source.indexOf(orig_animal);
-var new_animal_patch = "'Capybara'";
-
-debugger_handler = (function() {
- var already_called = false;
- return function() {
- if (already_called) {
- return;
- }
- already_called = true;
-
- var change_log = new Array();
- try {
- Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos,
- orig_animal.length, new_animal_patch, change_log);
- } finally {
- print("Change log: " + JSON.stringify(change_log) + "\n");
- }
- };
-})();
-
-var saved_exception = null;
-
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.Break) {
- try {
- debugger_handler();
- } catch (e) {
- saved_exception = e;
- }
- } else {
- print("Other: " + event);
- }
-}
-
-Debug.setListener(listener);
-
-var animal = SlimFunction();
-
-if (saved_exception) {
- print("Exception: " + saved_exception);
- assertUnreachable();
-}
-
-assertEquals("Capybara", animal);
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index 26b3c7807b..4aa79de659 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -34,7 +34,7 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
+support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6,7,8));
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
@@ -59,8 +59,8 @@ var elements_kind = {
}
function getKind(obj) {
- if (%HasFastSmiElements(obj)) return elements_kind.fast_smi_only;
- if (%HasFastObjectElements(obj)) return elements_kind.fast;
+ if (%HasFastSmiOnlyElements(obj)) return elements_kind.fast_smi_only;
+ if (%HasFastElements(obj)) return elements_kind.fast;
if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
// Every external kind is also an external array.
@@ -116,7 +116,7 @@ if (support_smi_only_arrays) {
assertKind(elements_kind.fast_smi_only, too);
}
-// Make sure the element kind transitions from smi when a non-smi is stored.
+// Make sure the element kind transitions from smionly when a non-smi is stored.
var you = new Array();
assertKind(elements_kind.fast_smi_only, you);
for (var i = 0; i < 1337; i++) {
diff --git a/deps/v8/test/mjsunit/elements-transition-hoisting.js b/deps/v8/test/mjsunit/elements-transition-hoisting.js
index 50ca2a16e2..5e78f10a0b 100644
--- a/deps/v8/test/mjsunit/elements-transition-hoisting.js
+++ b/deps/v8/test/mjsunit/elements-transition-hoisting.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,7 @@
// not hoisted) correctly, don't change the semantics programs and don't trigger
// deopt through hoisting in important situations.
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6));
+support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6));
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
diff --git a/deps/v8/test/mjsunit/elements-transition.js b/deps/v8/test/mjsunit/elements-transition.js
index 0dffd3723e..60e051b3fa 100644
--- a/deps/v8/test/mjsunit/elements-transition.js
+++ b/deps/v8/test/mjsunit/elements-transition.js
@@ -27,7 +27,7 @@
// Flags: --allow-natives-syntax --smi-only-arrays
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
+support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6,7,8));
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
@@ -44,8 +44,8 @@ if (support_smi_only_arrays) {
var array_1 = new Array(length);
var array_2 = new Array(length);
- assertTrue(%HasFastSmiElements(array_1));
- assertTrue(%HasFastSmiElements(array_2));
+ assertTrue(%HasFastSmiOnlyElements(array_1));
+ assertTrue(%HasFastSmiOnlyElements(array_2));
for (var i = 0; i < length; i++) {
if (i == length - 5 && test_double) {
// Trigger conversion to fast double elements at length-5.
@@ -57,8 +57,8 @@ if (support_smi_only_arrays) {
// Trigger conversion to fast object elements at length-3.
set(array_1, i, 'object');
set(array_2, i, 'object');
- assertTrue(%HasFastObjectElements(array_1));
- assertTrue(%HasFastObjectElements(array_2));
+ assertTrue(%HasFastElements(array_1));
+ assertTrue(%HasFastElements(array_2));
} else if (i != length - 7) {
// Set the element to an integer but leave a hole at length-7.
set(array_1, i, 2*i+1);
diff --git a/deps/v8/test/mjsunit/error-constructors.js b/deps/v8/test/mjsunit/error-constructors.js
index 107164df56..966a1629d7 100644
--- a/deps/v8/test/mjsunit/error-constructors.js
+++ b/deps/v8/test/mjsunit/error-constructors.js
@@ -25,7 +25,39 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+var e = new Error();
+assertFalse(e.hasOwnProperty('message'));
+Error.prototype.toString = Object.prototype.toString;
+assertEquals("[object Error]", Error.prototype.toString());
+assertEquals(Object.prototype, Error.prototype.__proto__);
+
+// Check that error construction does not call setters for the
+// properties on error objects in prototypes.
+function fail() { assertTrue(false); };
+ReferenceError.prototype.__defineSetter__('stack', fail);
+ReferenceError.prototype.__defineSetter__('message', fail);
+ReferenceError.prototype.__defineSetter__('type', fail);
+ReferenceError.prototype.__defineSetter__('arguments', fail);
+var e0 = new ReferenceError();
+var e1 = new ReferenceError('123');
+assertTrue(e1.hasOwnProperty('message'));
+assertTrue(e0.hasOwnProperty('stack'));
+assertTrue(e1.hasOwnProperty('stack'));
+assertTrue(e0.hasOwnProperty('type'));
+assertTrue(e1.hasOwnProperty('type'));
+assertTrue(e0.hasOwnProperty('arguments'));
+assertTrue(e1.hasOwnProperty('arguments'));
+
+// Check that the name property on error prototypes is read-only and
+// dont-delete. This is not specified, but allowing overwriting the
+// name property with a getter can leaks error objects from different
+// script tags in the same context in a browser setting. We therefore
+// disallow changes to the name property on error objects.
+assertEquals("ReferenceError", ReferenceError.prototype.name);
+delete ReferenceError.prototype.name;
+assertEquals("ReferenceError", ReferenceError.prototype.name);
+ReferenceError.prototype.name = "not a reference error";
+assertEquals("ReferenceError", ReferenceError.prototype.name);
// Check that message and name are not enumerable on Error objects.
var desc = Object.getOwnPropertyDescriptor(Error.prototype, 'name');
@@ -43,75 +75,8 @@ assertFalse(desc['enumerable']);
desc = Object.getOwnPropertyDescriptor(e, 'stack');
assertFalse(desc['enumerable']);
-var e = new Error();
-assertFalse(e.hasOwnProperty('message'));
-
// name is not tested above, but in addition we should have no enumerable
// properties, so we simply assert that.
for (var v in e) {
assertUnreachable();
}
-
-// Check that error construction does not call setters for the
-// properties on error objects in prototypes.
-function fail() { assertUnreachable(); };
-ReferenceError.prototype.__defineSetter__('name', fail);
-ReferenceError.prototype.__defineSetter__('message', fail);
-ReferenceError.prototype.__defineSetter__('type', fail);
-ReferenceError.prototype.__defineSetter__('arguments', fail);
-ReferenceError.prototype.__defineSetter__('stack', fail);
-
-var e = new ReferenceError();
-assertTrue(e.hasOwnProperty('stack'));
-assertTrue(e.hasOwnProperty('type'));
-assertTrue(e.hasOwnProperty('arguments'));
-
-var e = new ReferenceError('123');
-assertTrue(e.hasOwnProperty('message'));
-assertTrue(e.hasOwnProperty('stack'));
-assertTrue(e.hasOwnProperty('type'));
-assertTrue(e.hasOwnProperty('arguments'));
-
-var e = %MakeReferenceError("my_test_error", [0, 1]);
-assertTrue(e.hasOwnProperty('stack'));
-assertTrue(e.hasOwnProperty('type'));
-assertTrue(e.hasOwnProperty('arguments'));
-assertEquals("my_test_error", e.type)
-
-// Check that intercepting property access from toString is prevented for
-// compiler errors. This is not specified, but allowing interception
-// through a getter can leak error objects from different
-// script tags in the same context in a browser setting.
-var errors = [SyntaxError, ReferenceError, TypeError];
-for (var i in errors) {
- var name = errors[i].prototype.toString();
- // Monkey-patch prototype.
- var props = ["name", "message", "type", "arguments", "stack"];
- for (var j in props) {
- errors[i].prototype.__defineGetter__(props[j], fail);
- }
- // String conversion should not invoke monkey-patched getters on prototype.
- var e = new errors[i];
- assertEquals(name, e.toString());
- // Custom getters in actual objects are welcome.
- e.__defineGetter__("name", function() { return "mine"; });
- assertEquals("mine", e.toString());
-}
-
-// Monkey-patching non-static errors should still be observable.
-function MyError() {}
-MyError.prototype = new Error;
-var errors = [Error, RangeError, EvalError, URIError, MyError];
-for (var i in errors) {
- errors[i].prototype.__defineGetter__("name", function() { return "my"; });
- errors[i].prototype.__defineGetter__("message", function() { return "moo"; });
- var e = new errors[i];
- assertEquals("my: moo", e.toString());
-}
-
-
-Error.prototype.toString = Object.prototype.toString;
-assertEquals("[object Error]", Error.prototype.toString());
-assertEquals(Object.prototype, Error.prototype.__proto__);
-var e = new Error("foo");
-assertEquals("[object Error]", e.toString());
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index ab5f2e3e76..a1b927097a 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -64,7 +64,6 @@ regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
# Stack manipulations in LiveEdit are buggy - see bug 915
debug-liveedit-check-stack: SKIP
debug-liveedit-patch-positions-replace: SKIP
-debug-liveedit-stack-padding: SKIP
# Test Crankshaft compilation time. Expected to take too long in debug mode.
regress/regress-1969: PASS, SKIP if $mode == debug
diff --git a/deps/v8/test/mjsunit/packed-elements.js b/deps/v8/test/mjsunit/packed-elements.js
deleted file mode 100644
index 7f333e56e5..0000000000
--- a/deps/v8/test/mjsunit/packed-elements.js
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --smi-only-arrays --packed-arrays
-
-var has_packed_elements = !%HasFastHoleyElements(Array());
-
-function test1() {
- var a = Array(8);
- assertTrue(%HasFastSmiOrObjectElements(a));
- assertTrue(%HasFastHoleyElements(a));
-}
-
-function test2() {
- var a = Array();
- assertTrue(%HasFastSmiOrObjectElements(a));
- assertFalse(%HasFastHoleyElements(a));
-}
-
-function test3() {
- var a = Array(1,2,3,4,5,6,7);
- assertTrue(%HasFastSmiOrObjectElements(a));
- assertFalse(%HasFastHoleyElements(a));
-}
-
-function test4() {
- var a = [1, 2, 3, 4];
- assertTrue(%HasFastSmiElements(a));
- assertFalse(%HasFastHoleyElements(a));
- var b = [1, 2,, 4];
- assertTrue(%HasFastSmiElements(b));
- assertTrue(%HasFastHoleyElements(b));
-}
-
-function test5() {
- var a = [1, 2, 3, 4.5];
- assertTrue(%HasFastDoubleElements(a));
- assertFalse(%HasFastHoleyElements(a));
- var b = [1,, 3.5, 4];
- assertTrue(%HasFastDoubleElements(b));
- assertTrue(%HasFastHoleyElements(b));
- var c = [1, 3.5,, 4];
- assertTrue(%HasFastDoubleElements(c));
- assertTrue(%HasFastHoleyElements(c));
-}
-
-function test6() {
- var x = new Object();
- var a = [1, 2, 3.5, x];
- assertTrue(%HasFastObjectElements(a));
- assertFalse(%HasFastHoleyElements(a));
- assertEquals(1, a[0]);
- assertEquals(2, a[1]);
- assertEquals(3.5, a[2]);
- assertEquals(x, a[3]);
- var b = [1,, 3.5, x];
- assertTrue(%HasFastObjectElements(b));
- assertTrue(%HasFastHoleyElements(b));
- assertEquals(1, b[0]);
- assertEquals(undefined, b[1]);
- assertEquals(3.5, b[2]);
- assertEquals(x, b[3]);
- var c = [1, 3.5, x,,];
- assertTrue(%HasFastObjectElements(c));
- assertTrue(%HasFastHoleyElements(c));
- assertEquals(1, c[0]);
- assertEquals(3.5, c[1]);
- assertEquals(x, c[2]);
- assertEquals(undefined, c[3]);
-}
-
-function test_with_optimization(f) {
- // Run tests in a loop to make sure that inlined Array() constructor runs out
- // of new space memory and must fall back on runtime impl.
- for (i = 0; i < 250000; ++i) f();
- %OptimizeFunctionOnNextCall(f);
- for (i = 0; i < 250000; ++i) f(); // Make sure GC happens
-}
-
-if (has_packed_elements) {
- test_with_optimization(test1);
- test_with_optimization(test2);
- test_with_optimization(test3);
- test_with_optimization(test4);
- test_with_optimization(test5);
- test_with_optimization(test6);
-}
-
diff --git a/deps/v8/test/mjsunit/regexp-capture-3.js b/deps/v8/test/mjsunit/regexp-capture-3.js
index 9bdd600599..b676f01c2c 100644
--- a/deps/v8/test/mjsunit/regexp-capture-3.js
+++ b/deps/v8/test/mjsunit/regexp-capture-3.js
@@ -162,7 +162,6 @@ assertEquals("*foo * baz", a);
// string we can test that the relevant node is removed by verifying that
// there is no hang.
function NoHang(re) {
- print(re);
"This is an ASCII string that could take forever".match(re);
}
@@ -216,3 +215,5 @@ regex10.exec(input0);
var regex11 = /^(?:[^\u0000-\u0080]|[0-9a-z?,.!&\s#()])+$/i;
regex11.exec(input0);
+var regex12 = /u(\xf0{8}?\D*?|( ? !)$h??(|)*?(||)+?\6((?:\W\B|--\d-*-|)?$){0, }?|^Y( ? !1)\d+)+a/;
+regex12.exec("");
diff --git a/deps/v8/test/mjsunit/regexp-capture.js b/deps/v8/test/mjsunit/regexp-capture.js
index 8aae71795a..307309482a 100755
--- a/deps/v8/test/mjsunit/regexp-capture.js
+++ b/deps/v8/test/mjsunit/regexp-capture.js
@@ -56,3 +56,5 @@ assertEquals(["bbc", "b"], /^(b+|a){1,2}?bc/.exec("bbc"));
assertEquals(["bbaa", "a", "", "a"],
/((\3|b)\2(a)){2,}/.exec("bbaababbabaaaaabbaaaabba"));
+// From crbug.com/128821 - don't hang:
+"".match(/((a|i|A|I|u|o|U|O)(s|c|b|c|d|f|g|h|j|k|l|m|n|p|q|r|s|t|v|w|x|y|z|B|C|D|F|G|H|J|K|L|M|N|P|Q|R|S|T|V|W|X|Y|Z)*) de\/da([.,!?\s]|$)/);
diff --git a/deps/v8/test/mjsunit/regexp-global.js b/deps/v8/test/mjsunit/regexp-global.js
deleted file mode 100644
index 12f8578596..0000000000
--- a/deps/v8/test/mjsunit/regexp-global.js
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Test that an optional capture is cleared between two matches.
-var str = "ABX X";
-str = str.replace(/(\w)?X/g, function(match, capture) {
- assertTrue(match.indexOf(capture) >= 0 ||
- capture === undefined);
- return capture ? capture.toLowerCase() : "-";
- });
-assertEquals("Ab -", str);
-
-// Test zero-length matches.
-str = "Als Gregor Samsa eines Morgens";
-str = str.replace(/\b/g, function(match, capture) {
- return "/";
- });
-assertEquals("/Als/ /Gregor/ /Samsa/ /eines/ /Morgens/", str);
-
-// Test zero-length matches that have non-zero-length sub-captures.
-str = "It was a pleasure to burn.";
-str = str.replace(/(?=(\w+))\b/g, function(match, capture) {
- return capture.length;
- });
-assertEquals("2It 3was 1a 8pleasure 2to 4burn.", str);
-
-// Test multiple captures.
-str = "Try not. Do, or do not. There is no try.";
-str = str.replace(/(not?)|(do)|(try)/gi,
- function(match, c1, c2, c3) {
- assertTrue((c1 === undefined && c2 === undefined) ||
- (c2 === undefined && c3 === undefined) ||
- (c1 === undefined && c3 === undefined));
- if (c1) return "-";
- if (c2) return "+";
- if (c3) return "="
- });
-assertEquals("= -. +, or + -. There is - =.", str);
-
-// Test multiple alternate captures.
-str = "FOUR LEGS GOOD, TWO LEGS BAD!";
-str = str.replace(/(FOUR|TWO) LEGS (GOOD|BAD)/g,
- function(match, num_legs, likeability) {
- assertTrue(num_legs !== undefined);
- assertTrue(likeability !== undefined);
- if (num_legs == "FOUR") assertTrue(likeability == "GOOD");
- if (num_legs == "TWO") assertTrue(likeability == "BAD");
- return match.length - 10;
- });
-assertEquals("4, 2!", str);
-
-
-// The same tests with UC16.
-
-//Test that an optional capture is cleared between two matches.
-str = "AB\u1234 \u1234";
-str = str.replace(/(\w)?\u1234/g,
- function(match, capture) {
- assertTrue(match.indexOf(capture) >= 0 ||
- capture === undefined);
- return capture ? capture.toLowerCase() : "-";
- });
-assertEquals("Ab -", str);
-
-// Test zero-length matches.
-str = "Als \u2623\u2642 eines Morgens";
-str = str.replace(/\b/g, function(match, capture) {
- return "/";
- });
-assertEquals("/Als/ \u2623\u2642 /eines/ /Morgens/", str);
-
-// Test zero-length matches that have non-zero-length sub-captures.
-str = "It was a pleasure to \u70e7.";
-str = str.replace(/(?=(\w+))\b/g, function(match, capture) {
- return capture.length;
- });
-assertEquals("2It 3was 1a 8pleasure 2to \u70e7.", str);
-
-// Test multiple captures.
-str = "Try not. D\u26aa, or d\u26aa not. There is no try.";
-str = str.replace(/(not?)|(d\u26aa)|(try)/gi,
- function(match, c1, c2, c3) {
- assertTrue((c1 === undefined && c2 === undefined) ||
- (c2 === undefined && c3 === undefined) ||
- (c1 === undefined && c3 === undefined));
- if (c1) return "-";
- if (c2) return "+";
- if (c3) return "="
- });
-assertEquals("= -. +, or + -. There is - =.", str);
-
-// Test multiple alternate captures.
-str = "FOUR \u817f GOOD, TWO \u817f BAD!";
-str = str.replace(/(FOUR|TWO) \u817f (GOOD|BAD)/g,
- function(match, num_legs, likeability) {
- assertTrue(num_legs !== undefined);
- assertTrue(likeability !== undefined);
- if (num_legs == "FOUR") assertTrue(likeability == "GOOD");
- if (num_legs == "TWO") assertTrue(likeability == "BAD");
- return match.length - 7;
- });
-assertEquals("4, 2!", str);
-
-// Test capture that is a real substring.
-var str = "Beasts of England, beasts of Ireland";
-str = str.replace(/(.*)/g, function(match) { return '~'; });
-assertEquals("~~", str);
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index c2d92823bc..ec82c96e09 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -705,14 +705,3 @@ assertThrows("RegExp('(?!*)')");
// Test trimmed regular expression for RegExp.test().
assertTrue(/.*abc/.test("abc"));
assertFalse(/.*\d+/.test("q"));
-
-// Test that RegExp.prototype.toString() throws TypeError for
-// incompatible receivers (ES5 section 15.10.6 and 15.10.6.4).
-assertThrows("RegExp.prototype.toString.call(null)", TypeError);
-assertThrows("RegExp.prototype.toString.call(0)", TypeError);
-assertThrows("RegExp.prototype.toString.call('')", TypeError);
-assertThrows("RegExp.prototype.toString.call(false)", TypeError);
-assertThrows("RegExp.prototype.toString.call(true)", TypeError);
-assertThrows("RegExp.prototype.toString.call([])", TypeError);
-assertThrows("RegExp.prototype.toString.call({})", TypeError);
-assertThrows("RegExp.prototype.toString.call(function(){})", TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-117409.js b/deps/v8/test/mjsunit/regress/regress-117409.js
index 98aab5ac2d..9222191ae6 100644
--- a/deps/v8/test/mjsunit/regress/regress-117409.js
+++ b/deps/v8/test/mjsunit/regress/regress-117409.js
@@ -36,7 +36,7 @@ var literal = [1.2];
KeyedStoreIC(literal);
KeyedStoreIC(literal);
-// Truncate array to 0 elements, at which point backing store will be replaced
+// Trruncate array to 0 elements, at which point backing store will be replaced
// with empty fixed array.
literal.length = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-128146.js b/deps/v8/test/mjsunit/regress/regress-128146.js
deleted file mode 100644
index 730dd91065..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-128146.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Define accessor properties, resulting in an AccessorPair with 2 transitions.
-Object.defineProperty({},"foo",{set:function(){},configurable:false});
-Object.defineProperty({},"foo",{get:function(){},configurable:false});
-
-// Define a data property under the same name.
-Object.defineProperty({},"foo",{});
diff --git a/deps/v8/test/mjsunit/regress/regress-1639-2.js b/deps/v8/test/mjsunit/regress/regress-1639-2.js
index 01f0dc2048..c439dd8fff 100644
--- a/deps/v8/test/mjsunit/regress/regress-1639-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-1639-2.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,7 +28,6 @@
// Flags: --expose-debug-as debug
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
-var exception = false;
function sendCommand(state, cmd) {
// Get the debug command processor in paused state.
@@ -80,7 +79,6 @@ function listener(event, exec_state, event_data, data) {
}
} catch (e) {
print(e);
- exception = true;
}
}
@@ -93,4 +91,3 @@ function a() {
// Set a break point and call to invoke the debug event listener.
Debug.setBreakPoint(a, 0, 0);
a();
-assertFalse(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-1639.js b/deps/v8/test/mjsunit/regress/regress-1639.js
index 47cdbc43c1..ed68c97df8 100644
--- a/deps/v8/test/mjsunit/regress/regress-1639.js
+++ b/deps/v8/test/mjsunit/regress/regress-1639.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,6 @@
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
var breaks = 0;
-var exception = false;
function sendCommand(state, cmd) {
// Get the debug command processor in paused state.
@@ -48,18 +47,15 @@ function listener(event, exec_state, event_data, data) {
"should not break on unexpected lines")
assertEquals('BREAK ' + breaks, line.substr(-7));
breaks++;
- if (breaks < 4) {
- sendCommand(exec_state, {
- seq: 0,
- type: "request",
- command: "continue",
- arguments: { stepaction: "next" }
- });
- }
+ sendCommand(exec_state, {
+ seq: 0,
+ type: "request",
+ command: "continue",
+ arguments: { stepaction: "next" }
+ });
}
} catch (e) {
print(e);
- exception = true;
}
}
@@ -86,6 +82,4 @@ function c() {
// Set a break point and call to invoke the debug event listener.
Debug.setBreakPoint(b, 0, 0);
a(b);
-a(); // BREAK 3
-
-assertFalse(exception);
+// BREAK 3
diff --git a/deps/v8/test/mjsunit/regress/regress-1849.js b/deps/v8/test/mjsunit/regress/regress-1849.js
index 5b8fc50f31..176f918b93 100644
--- a/deps/v8/test/mjsunit/regress/regress-1849.js
+++ b/deps/v8/test/mjsunit/regress/regress-1849.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// See: http://code.google.com/p/v8/issues/detail?id=1849
+// See: http://code.google.com/p/v8/issues/detail?id=1878
// Flags: --allow-natives-syntax
@@ -36,4 +36,4 @@ for (var i = 0; i < count; i++) {
arr[i] = 0;
}
assertFalse(%HasFastDoubleElements(arr));
-assertTrue(%HasFastSmiElements(arr));
+assertTrue(%HasFastSmiOnlyElements(arr));
diff --git a/deps/v8/test/mjsunit/regress/regress-1878.js b/deps/v8/test/mjsunit/regress/regress-1878.js
index fbc47bdd14..a1648b1217 100644
--- a/deps/v8/test/mjsunit/regress/regress-1878.js
+++ b/deps/v8/test/mjsunit/regress/regress-1878.js
@@ -34,11 +34,11 @@ var a = Array();
for (var i = 0; i < 1000; i++) {
var ai = natives.InternalArray(10000);
assertFalse(%HaveSameMap(ai, a));
- assertTrue(%HasFastObjectElements(ai));
+ assertTrue(%HasFastElements(ai));
}
for (var i = 0; i < 1000; i++) {
var ai = new natives.InternalArray(10000);
assertFalse(%HaveSameMap(ai, a));
- assertTrue(%HasFastObjectElements(ai));
+ assertTrue(%HasFastElements(ai));
}
diff --git a/deps/v8/test/mjsunit/regress/regress-2153.js b/deps/v8/test/mjsunit/regress/regress-2153.js
deleted file mode 100644
index 3170042bed..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-2153.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var o = {};
-o.__defineGetter__('foo', function () { return null; });
-var o = {};
-o.foo = 42;
-assertEquals(42, o.foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-122271.js b/deps/v8/test/mjsunit/regress/regress-crbug-122271.js
index 8ae91e857a..3a99a7fa58 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-122271.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-122271.js
@@ -39,11 +39,11 @@ function foo(array) {
array.foo = "bar";
}
-assertTrue(%HasFastSmiElements(a));
-assertTrue(%HasFastObjectElements(b));
+assertTrue(%HasFastSmiOnlyElements(a));
+assertTrue(%HasFastElements(b));
foo(a);
foo(b);
-assertTrue(%HasFastSmiElements(a));
-assertTrue(%HasFastObjectElements(b));
+assertTrue(%HasFastSmiOnlyElements(a));
+assertTrue(%HasFastElements(b));
diff --git a/deps/v8/test/mjsunit/regress/regress-smi-only-concat.js b/deps/v8/test/mjsunit/regress/regress-smi-only-concat.js
index 55ca2996ff..a9a6d89b06 100644
--- a/deps/v8/test/mjsunit/regress/regress-smi-only-concat.js
+++ b/deps/v8/test/mjsunit/regress/regress-smi-only-concat.js
@@ -33,5 +33,5 @@
var fast_array = ['a', 'b'];
var array = fast_array.concat(fast_array);
-assertTrue(%HasFastObjectElements(fast_array));
-assertTrue(%HasFastObjectElements(array));
+assertTrue(%HasFastElements(fast_array));
+assertTrue(%HasFastElements(array)); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/regress/regress-transcendental.js b/deps/v8/test/mjsunit/regress/regress-transcendental.js
deleted file mode 100644
index b5dbcb48af..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-transcendental.js
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-gc
-
-// Test whether the runtime implementation and generated code of
-// sine and tangens return the same results.
-
-function test(f, x, name) {
- // Reset transcendental cache.
- gc();
- // Initializing cache leads to a runtime call.
- var runtime_result = f(x);
- // Flush transcendental cache entries and optimize f.
- for (var i = 0; i < 100000; i++) f(i);
- // Calculate using generated code.
- var gencode_result = f(x);
- print(name + " runtime function: " + runtime_result);
- print(name + " generated code : " + gencode_result);
- assertEquals(gencode_result, runtime_result);
-}
-
-test(Math.tan, -1.57079632679489660000, "Math.tan");
-test(Math.sin, 6.283185307179586, "Math.sin");
-
diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js
index 438eec979d..536e71bbb5 100644
--- a/deps/v8/test/mjsunit/stack-traces.js
+++ b/deps/v8/test/mjsunit/stack-traces.js
@@ -111,18 +111,6 @@ function testStrippedCustomError() {
throw new CustomError("hep-hey", CustomError);
}
-MyObj = function() { FAIL; }
-
-MyObjCreator = function() {}
-
-MyObjCreator.prototype.Create = function() {
- return new MyObj();
-}
-
-function testClassNames() {
- (new MyObjCreator).Create();
-}
-
// Utility function for testing that the expected strings occur
// in the stack trace produced when running the given function.
function testTrace(name, fun, expected, unexpected) {
@@ -266,8 +254,6 @@ testTrace("testDefaultCustomError", testDefaultCustomError,
["collectStackTrace"]);
testTrace("testStrippedCustomError", testStrippedCustomError, ["hep-hey"],
["new CustomError", "collectStackTrace"]);
-testTrace("testClassNames", testClassNames,
- ["new MyObj", "MyObjCreator.Create"], ["as Create"]);
testCallerCensorship();
testUnintendedCallerCensorship();
testErrorsDuringFormatting();
diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js
index ac039930c3..fd7db28a0d 100644
--- a/deps/v8/test/mjsunit/unbox-double-arrays.js
+++ b/deps/v8/test/mjsunit/unbox-double-arrays.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -278,8 +278,7 @@ function testOneArrayType(allocator) {
expected_array_value(7));
%DeoptimizeFunction(test_various_loads6);
- %ClearFunctionTypeFeedback(test_various_stores);
- %ClearFunctionTypeFeedback(test_various_loads7);
+ gc();
// Test stores for non-NaN.
var large_array = new allocator(large_array_size);
@@ -377,7 +376,7 @@ delete large_array2[5];
// Convert back to fast elements and make sure the contents of the array are
// unchanged.
large_array2[25] = new Object();
-assertTrue(%HasFastObjectElements(large_array2));
+assertTrue(%HasFastElements(large_array2));
for (var i= 0; i < approx_dict_to_elements_threshold; i += 500 ) {
if (i != 25 && i != 5) {
assertEquals(expected_array_value(i), large_array2[i]);
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 07f760c8d7..2c9bf06f14 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -31,7 +31,6 @@ import os
from os.path import join, exists
import urllib
import hashlib
-import sys
import tarfile
@@ -121,11 +120,7 @@ class Test262TestConfiguration(test.TestConfiguration):
os.remove(archive_name)
raise Exception("Hash mismatch of test data file")
archive = tarfile.open(archive_name, 'r:bz2')
- if sys.platform in ('win32', 'cygwin'):
- # Magic incantation to allow longer path names on Windows.
- archive.extractall(u'\\\\?\\%s' % self.root)
- else:
- archive.extractall(self.root)
+ archive.extractall(join(self.root))
os.rename(join(self.root, 'test262-%s' % revision), directory_name)
def GetBuildRequirements(self):
diff --git a/deps/v8/tools/check-static-initializers.sh b/deps/v8/tools/check-static-initializers.sh
index 1103a97787..e6da828765 100644
--- a/deps/v8/tools/check-static-initializers.sh
+++ b/deps/v8/tools/check-static-initializers.sh
@@ -37,19 +37,14 @@
expected_static_init_count=3
v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
-
-if [ -n "$1" ] ; then
- d8="${v8_root}/$1"
-else
- d8="${v8_root}/d8"
-fi
+d8="${v8_root}/d8"
if [ ! -f "$d8" ]; then
- echo "d8 binary not found: $d8"
+ echo "Please build the project with SCons."
exit 1
fi
-static_inits=$(nm "$d8" | grep _GLOBAL_ | grep _I_ | awk '{ print $NF; }')
+static_inits=$(nm "$d8" | grep _GLOBAL__I | awk '{ print $NF; }')
static_init_count=$(echo "$static_inits" | wc -l)
@@ -57,7 +52,4 @@ if [ $static_init_count -gt $expected_static_init_count ]; then
echo "Too many static initializers."
echo "$static_inits"
exit 1
-else
- echo "Static initializer check passed ($static_init_count initializers)."
- exit 0
fi
diff --git a/deps/v8/tools/fuzz-harness.sh b/deps/v8/tools/fuzz-harness.sh
deleted file mode 100644
index efbf8646ce..0000000000
--- a/deps/v8/tools/fuzz-harness.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# A simple harness that downloads and runs 'jsfunfuzz' against d8. This
-# takes a long time because it runs many iterations and is intended for
-# automated usage. The package containing 'jsfunfuzz' can be found as an
-# attachment to this bug:
-# https://bugzilla.mozilla.org/show_bug.cgi?id=jsfunfuzz
-
-JSFUNFUZZ_URL="https://bugzilla.mozilla.org/attachment.cgi?id=310631"
-JSFUNFUZZ_MD5="d0e497201c5cd7bffbb1cdc1574f4e32"
-
-v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
-
-if [ -n "$1" ]; then
- d8="${v8_root}/$1"
-else
- d8="${v8_root}/d8"
-fi
-
-if [ ! -f "$d8" ]; then
- echo "Failed to find d8 binary: $d8"
- exit 1
-fi
-
-jsfunfuzz_file="$v8_root/tools/jsfunfuzz.zip"
-if [ ! -f "$jsfunfuzz_file" ]; then
- echo "Downloading $jsfunfuzz_file ..."
- wget -q -O "$jsfunfuzz_file" $JSFUNFUZZ_URL || exit 1
-fi
-
-jsfunfuzz_sum=$(md5sum "$jsfunfuzz_file" | awk '{ print $1 }')
-if [ $jsfunfuzz_sum != $JSFUNFUZZ_MD5 ]; then
- echo "Failed to verify checksum!"
- exit 1
-fi
-
-jsfunfuzz_dir="$v8_root/tools/jsfunfuzz"
-if [ ! -d "$jsfunfuzz_dir" ]; then
- echo "Unpacking into $jsfunfuzz_dir ..."
- unzip "$jsfunfuzz_file" -d "$jsfunfuzz_dir" || exit 1
- echo "Patching runner ..."
- cat << EOF | patch -s -p0 -d "$v8_root"
---- tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py~
-+++ tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py
-@@ -125,7 +125,7 @@
-
- def many_timed_runs():
- iteration = 0
-- while True:
-+ while iteration < 100:
- iteration += 1
- logfilename = "w%d" % iteration
- one_timed_run(logfilename)
-EOF
-fi
-
-flags='--debug-code --expose-gc --verify-gc'
-python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \
- "$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
-exit_code=$(cat w* | grep " looking good" -c)
-exit_code=$((100-exit_code))
-tar -cjf fuzz-results-$(date +%y%m%d).tar.bz2 err-* w*
-rm -f err-* w*
-
-echo "Total failures: $exit_code"
-exit $exit_code
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 407130695a..9977289872 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -27,7 +27,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import cmd
import ctypes
import mmap
import optparse
@@ -37,7 +36,6 @@ import sys
import types
import codecs
import re
-import struct
USAGE="""usage: %prog [OPTION]...
@@ -108,62 +106,6 @@ class Descriptor(object):
return Raw
-def do_dump(reader, heap):
- """Dump all available memory regions."""
- def dump_region(reader, start, size, location):
- print
- while start & 3 != 0:
- start += 1
- size -= 1
- location += 1
- is_executable = reader.IsProbableExecutableRegion(location, size)
- is_ascii = reader.IsProbableASCIIRegion(location, size)
-
- if is_executable is not False:
- lines = reader.GetDisasmLines(start, size)
- for line in lines:
- print FormatDisasmLine(start, heap, line)
- print
-
- if is_ascii is not False:
- # Output in the same format as the Unix hd command
- addr = start
- for slot in xrange(location, location + size, 16):
- hex_line = ""
- asc_line = ""
- for i in xrange(0, 16):
- if slot + i < location + size:
- byte = ctypes.c_uint8.from_buffer(reader.minidump, slot + i).value
- if byte >= 0x20 and byte < 0x7f:
- asc_line += chr(byte)
- else:
- asc_line += "."
- hex_line += " %02x" % (byte)
- else:
- hex_line += " "
- if i == 7:
- hex_line += " "
- print "%s %s |%s|" % (reader.FormatIntPtr(addr),
- hex_line,
- asc_line)
- addr += 16
-
- if is_executable is not True and is_ascii is not True:
- print "%s - %s" % (reader.FormatIntPtr(start),
- reader.FormatIntPtr(start + size))
- for slot in xrange(start,
- start + size,
- reader.PointerSize()):
- maybe_address = reader.ReadUIntPtr(slot)
- heap_object = heap.FindObject(maybe_address)
- print "%s: %s" % (reader.FormatIntPtr(slot),
- reader.FormatIntPtr(maybe_address))
- if heap_object:
- heap_object.Print(Printer())
- print
-
- reader.ForEachMemoryRegion(dump_region)
-
# Set of structures and constants that describe the layout of minidump
# files. Based on MSDN and Google Breakpad.
@@ -502,91 +444,6 @@ class MinidumpReader(object):
location = self.FindLocation(address)
return self.minidump[location:location + size]
- def _ReadWord(self, location):
- if self.arch == MD_CPU_ARCHITECTURE_AMD64:
- return ctypes.c_uint64.from_buffer(self.minidump, location).value
- elif self.arch == MD_CPU_ARCHITECTURE_X86:
- return ctypes.c_uint32.from_buffer(self.minidump, location).value
-
- def IsProbableASCIIRegion(self, location, length):
- ascii_bytes = 0
- non_ascii_bytes = 0
- for loc in xrange(location, location + length):
- byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
- if byte >= 0x7f:
- non_ascii_bytes += 1
- if byte < 0x20 and byte != 0:
- non_ascii_bytes += 1
- if byte < 0x7f and byte >= 0x20:
- ascii_bytes += 1
- if byte == 0xa: # newline
- ascii_bytes += 1
- if ascii_bytes * 10 <= length:
- return False
- if length > 0 and ascii_bytes > non_ascii_bytes * 7:
- return True
- if ascii_bytes > non_ascii_bytes * 3:
- return None # Maybe
- return False
-
- def IsProbableExecutableRegion(self, location, length):
- opcode_bytes = 0
- sixty_four = self.arch == MD_CPU_ARCHITECTURE_AMD64
- for loc in xrange(location, location + length):
- byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
- if (byte == 0x8b or # mov
- byte == 0x89 or # mov reg-reg
- (byte & 0xf0) == 0x50 or # push/pop
- (sixty_four and (byte & 0xf0) == 0x40) or # rex prefix
- byte == 0xc3 or # return
- byte == 0x74 or # jeq
- byte == 0x84 or # jeq far
- byte == 0x75 or # jne
- byte == 0x85 or # jne far
- byte == 0xe8 or # call
- byte == 0xe9 or # jmp far
- byte == 0xeb): # jmp near
- opcode_bytes += 1
- opcode_percent = (opcode_bytes * 100) / length
- threshold = 20
- if opcode_percent > threshold + 2:
- return True
- if opcode_percent > threshold - 2:
- return None # Maybe
- return False
-
- def FindRegion(self, addr):
- answer = [-1, -1]
- def is_in(reader, start, size, location):
- if addr >= start and addr < start + size:
- answer[0] = start
- answer[1] = size
- self.ForEachMemoryRegion(is_in)
- if answer[0] == -1:
- return None
- return answer
-
- def ForEachMemoryRegion(self, cb):
- if self.memory_list64 is not None:
- for r in self.memory_list64.ranges:
- location = self.memory_list64.base_rva + offset
- cb(self, r.start, r.size, location)
- offset += r.size
-
- if self.memory_list is not None:
- for r in self.memory_list.ranges:
- cb(self, r.start, r.memory.data_size, r.memory.rva)
-
- def FindWord(self, word):
- def search_inside_region(reader, start, size, location):
- for loc in xrange(location, location + size):
- if reader._ReadWord(loc) == word:
- slot = start + (loc - location)
- print "%s: %s" % (reader.FormatIntPtr(slot),
- reader.FormatIntPtr(word))
-
- self.ForEachMemoryRegion(search_inside_region)
-
def FindLocation(self, address):
offset = 0
if self.memory_list64 is not None:
@@ -888,10 +745,7 @@ class ConsString(String):
self.right = self.ObjectField(self.RightOffset())
def GetChars(self):
- try:
- return self.left.GetChars() + self.right.GetChars()
- except:
- return "***CAUGHT EXCEPTION IN GROKDUMP***"
+ return self.left.GetChars() + self.right.GetChars()
class Oddball(HeapObject):
@@ -1157,112 +1011,55 @@ CONTEXT_FOR_ARCH = {
['eax', 'ebx', 'ecx', 'edx', 'edi', 'esi', 'ebp', 'esp', 'eip']
}
-class InspectionShell(cmd.Cmd):
- def __init__(self, reader, heap):
- cmd.Cmd.__init__(self)
- self.reader = reader
- self.heap = heap
- self.prompt = "(grok) "
-
- def do_dd(self, address):
- "Interpret memory at the given address (if available)"\
- " as a sequence of words."
- start = int(address, 16)
- for slot in xrange(start,
- start + self.reader.PointerSize() * 10,
- self.reader.PointerSize()):
- maybe_address = self.reader.ReadUIntPtr(slot)
- heap_object = self.heap.FindObject(maybe_address)
- print "%s: %s" % (self.reader.FormatIntPtr(slot),
- self.reader.FormatIntPtr(maybe_address))
- if heap_object:
- heap_object.Print(Printer())
- print
-
- def do_s(self, word):
- "Search for a given word in available memory regions"
- word = int(word, 0)
- print "searching for word", word
- self.reader.FindWord(word)
-
- def do_list(self, smth):
- """List all available memory regions."""
- def print_region(reader, start, size, location):
- print "%s - %s" % (reader.FormatIntPtr(start),
- reader.FormatIntPtr(start + size))
-
- self.reader.ForEachMemoryRegion(print_region)
-
def AnalyzeMinidump(options, minidump_name):
reader = MinidumpReader(options, minidump_name)
- heap = None
DebugPrint("========================================")
if reader.exception is None:
print "Minidump has no exception info"
- else:
- print "Exception info:"
- exception_thread = reader.thread_map[reader.exception.thread_id]
- print " thread id: %d" % exception_thread.id
- print " code: %08X" % reader.exception.exception.code
- print " context:"
- for r in CONTEXT_FOR_ARCH[reader.arch]:
- print " %s: %s" % (r, reader.FormatIntPtr(reader.Register(r)))
- # TODO(vitalyr): decode eflags.
- print " eflags: %s" % bin(reader.exception_context.eflags)[2:]
- print
-
- stack_top = reader.ExceptionSP()
- stack_bottom = exception_thread.stack.start + \
- exception_thread.stack.memory.data_size
- stack_map = {reader.ExceptionIP(): -1}
- for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
- maybe_address = reader.ReadUIntPtr(slot)
- if not maybe_address in stack_map:
- stack_map[maybe_address] = slot
- heap = V8Heap(reader, stack_map)
-
- print "Disassembly around exception.eip:"
- disasm_start = reader.ExceptionIP() - EIP_PROXIMITY
- disasm_bytes = 2 * EIP_PROXIMITY
- if (options.full):
- full_range = reader.FindRegion(reader.ExceptionIP())
- if full_range is not None:
- disasm_start = full_range[0]
- disasm_bytes = full_range[1]
-
- lines = reader.GetDisasmLines(disasm_start, disasm_bytes)
-
- for line in lines:
- print FormatDisasmLine(disasm_start, heap, line)
- print
-
- if heap is None:
- heap = V8Heap(reader, None)
-
- if options.full:
- do_dump(reader, heap)
-
- if options.shell:
- InspectionShell(reader, heap).cmdloop("type help to get help")
- else:
- if reader.exception is not None:
- print "Annotated stack (from exception.esp to bottom):"
- for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
- maybe_address = reader.ReadUIntPtr(slot)
- heap_object = heap.FindObject(maybe_address)
- print "%s: %s" % (reader.FormatIntPtr(slot),
- reader.FormatIntPtr(maybe_address))
- if heap_object:
- heap_object.Print(Printer())
- print
+ return
+ print "Exception info:"
+ exception_thread = reader.thread_map[reader.exception.thread_id]
+ print " thread id: %d" % exception_thread.id
+ print " code: %08X" % reader.exception.exception.code
+ print " context:"
+ for r in CONTEXT_FOR_ARCH[reader.arch]:
+ print " %s: %s" % (r, reader.FormatIntPtr(reader.Register(r)))
+ # TODO(vitalyr): decode eflags.
+ print " eflags: %s" % bin(reader.exception_context.eflags)[2:]
+ print
+
+ stack_top = reader.ExceptionSP()
+ stack_bottom = exception_thread.stack.start + \
+ exception_thread.stack.memory.data_size
+ stack_map = {reader.ExceptionIP(): -1}
+ for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
+ maybe_address = reader.ReadUIntPtr(slot)
+ if not maybe_address in stack_map:
+ stack_map[maybe_address] = slot
+ heap = V8Heap(reader, stack_map)
+
+ print "Disassembly around exception.eip:"
+ start = reader.ExceptionIP() - EIP_PROXIMITY
+ lines = reader.GetDisasmLines(start, 2 * EIP_PROXIMITY)
+ for line in lines:
+ print FormatDisasmLine(start, heap, line)
+ print
+
+ print "Annotated stack (from exception.esp to bottom):"
+ for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
+ maybe_address = reader.ReadUIntPtr(slot)
+ heap_object = heap.FindObject(maybe_address)
+ print "%s: %s" % (reader.FormatIntPtr(slot),
+ reader.FormatIntPtr(maybe_address))
+ if heap_object:
+ heap_object.Print(Printer())
+ print
reader.Dispose()
if __name__ == "__main__":
parser = optparse.OptionParser(USAGE)
- parser.add_option("-s", "--shell", dest="shell", action="store_true")
- parser.add_option("-f", "--full", dest="full", action="store_true")
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index ea82d31814..46f85fe1ff 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -58,22 +58,31 @@
# has some sources to link into the component.
'../../src/v8dll-main.cc',
],
- 'defines': [
- 'V8_SHARED',
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'V8_SHARED',
- 'USING_V8_SHARED',
- ],
- },
'conditions': [
['OS=="mac"', {
'xcode_settings': {
'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
},
}],
+ ['OS=="win"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'USING_V8_SHARED',
+ ],
+ },
+ }, {
+ 'defines': [
+ 'V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'V8_SHARED',
+ ],
+ },
+ }],
['soname_version!=""', {
'product_extension': 'so.<(soname_version)',
}],
@@ -101,16 +110,27 @@
'dependencies': ['mksnapshot', 'js2c'],
}],
['component=="shared_library"', {
- 'defines': [
- 'V8_SHARED',
- 'BUILDING_V8_SHARED',
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'USING_V8_SHARED',
+ ],
+ },
+ }, {
+ 'defines': [
+ 'V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'V8_SHARED',
+ ],
+ },
+ }],
],
- 'direct_dependent_settings': {
- 'defines': [
- 'V8_SHARED',
- 'USING_V8_SHARED',
- ],
- },
}],
],
'dependencies': [
@@ -295,8 +315,6 @@
'../../src/dtoa.h',
'../../src/elements.cc',
'../../src/elements.h',
- '../../src/elements-kind.cc',
- '../../src/elements-kind.h',
'../../src/execution.cc',
'../../src/execution.h',
'../../src/factory.cc',
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index d06cbe47a9..fa559f362c 100644
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
#
-# Copyright 2012 the V8 project authors. All rights reserved.
+# Copyright 2006-2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -195,14 +195,14 @@ def ReadMacros(lines):
macro_match = MACRO_PATTERN.match(line)
if macro_match:
name = macro_match.group(1)
- args = [match.strip() for match in macro_match.group(2).split(',')]
+ args = map(string.strip, macro_match.group(2).split(','))
body = macro_match.group(3).strip()
macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
else:
python_match = PYTHON_MACRO_PATTERN.match(line)
if python_match:
name = python_match.group(1)
- args = [match.strip() for match in python_match.group(2).split(',')]
+ args = map(string.strip, python_match.group(2).split(','))
body = python_match.group(3).strip()
fun = eval("lambda " + ",".join(args) + ': ' + body)
macros.append((re.compile("\\b%s\\(" % name), PythonMacro(args, fun)))
diff --git a/deps/v8/tools/jsmin.py b/deps/v8/tools/jsmin.py
index 250dea9d72..e82f3d031e 100644
--- a/deps/v8/tools/jsmin.py
+++ b/deps/v8/tools/jsmin.py
@@ -1,6 +1,6 @@
#!/usr/bin/python2.4
-# Copyright 2012 the V8 project authors. All rights reserved.
+# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -154,7 +154,7 @@ class JavaScriptMinifier(object):
return var_name
while True:
identifier_first_char = self.identifier_counter % 52
- identifier_second_char = self.identifier_counter // 52
+ identifier_second_char = self.identifier_counter / 52
new_identifier = self.CharFromNumber(identifier_first_char)
if identifier_second_char != 0:
new_identifier = (
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index a0b81e85f4..c606e42e69 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -303,8 +303,7 @@ class SourceProcessor(SourceFileProcessor):
or (name == 'third_party')
or (name == 'gyp')
or (name == 'out')
- or (name == 'obj')
- or (name == 'DerivedSources'))
+ or (name == 'obj'))
IGNORE_COPYRIGHTS = ['cpplint.py',
'earley-boyer.js',
diff --git a/deps/v8/tools/test-wrapper-gypbuild.py b/deps/v8/tools/test-wrapper-gypbuild.py
index 27b5a3521d..fda4105a98 100755
--- a/deps/v8/tools/test-wrapper-gypbuild.py
+++ b/deps/v8/tools/test-wrapper-gypbuild.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
#
-# Copyright 2012 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -56,9 +56,6 @@ def BuildOptions():
result.add_option("--no-presubmit",
help='Skip presubmit checks',
default=False, action="store_true")
- result.add_option("--buildbot",
- help='Adapt to path structure used on buildbots',
- default=False, action="store_true")
# Flags this wrapper script handles itself:
result.add_option("-m", "--mode",
@@ -147,16 +144,14 @@ def ProcessOptions(options):
options.mode = options.mode.split(',')
options.arch = options.arch.split(',')
for mode in options.mode:
- if not mode.lower() in ['debug', 'release']:
+ if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
for arch in options.arch:
if not arch in ['ia32', 'x64', 'arm', 'mips']:
print "Unknown architecture %s" % arch
return False
- if options.buildbot:
- # Buildbots run presubmit tests as a separate step.
- options.no_presubmit = True
+
return True
@@ -218,37 +213,28 @@ def Main():
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
- returncodes = 0
if not options.no_presubmit:
print ">>> running presubmit tests"
- returncodes += subprocess.call([workspace + '/tools/presubmit.py'])
+ subprocess.call([workspace + '/tools/presubmit.py'])
- args_for_children = ['python']
- args_for_children += [workspace + '/tools/test.py'] + PassOnOptions(options)
+ args_for_children = [workspace + '/tools/test.py'] + PassOnOptions(options)
args_for_children += ['--no-build', '--build-system=gyp']
for arg in args:
args_for_children += [arg]
+ returncodes = 0
env = os.environ
for mode in options.mode:
for arch in options.arch:
print ">>> running tests for %s.%s" % (arch, mode)
- if options.buildbot:
- shellpath = workspace + '/' + options.outdir + '/' + mode
- mode = mode.lower()
- else:
- shellpath = workspace + '/' + options.outdir + '/' + arch + '.' + mode
+ shellpath = workspace + '/' + options.outdir + '/' + arch + '.' + mode
env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
shell = shellpath + "/d8"
- cmdline = ' '.join(args_for_children +
- ['--arch=' + arch] +
- ['--mode=' + mode] +
- ['--shell=' + shell])
- # TODO(jkummerow): This print is temporary.
- print "Executing: %s" % cmdline
-
- child = subprocess.Popen(cmdline,
+ child = subprocess.Popen(' '.join(args_for_children +
+ ['--arch=' + arch] +
+ ['--mode=' + mode] +
+ ['--shell=' + shell]),
shell=True,
cwd=workspace,
env=env)